diff options
309 files changed, 9502 insertions, 2576 deletions
diff --git a/client/go/.gitignore b/client/go/.gitignore index b35a2cef362..eb679add05e 100644 --- a/client/go/.gitignore +++ b/client/go/.gitignore @@ -1,4 +1,5 @@ bin/ +dist/ share/ !Makefile !build/ diff --git a/client/go/Makefile b/client/go/Makefile index 3297b628cb2..17748d765c8 100644 --- a/client/go/Makefile +++ b/client/go/Makefile @@ -1,23 +1,104 @@ # Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +# The version to release. Defaults to the current tag or revision. +# Use env VERSION=X.Y.Z make ... to override +VERSION ?= $(shell git describe --tags 2> /dev/null | sed -E "s/^vespa-|-1$$//g") +DEVEL_VERSION := $(shell echo "0.0.0-`git rev-parse --short HEAD`") +ifeq ($(VERSION),) + VERSION = $(DEVEL_VERSION) +endif + BIN ?= $(CURDIR)/bin SHARE ?= $(CURDIR)/share -# When building a new release the build system should set the VERSION -# environment variable to version being built -VERSION ?= $(shell echo "0.0.0-`git rev-parse --short HEAD`") +DIST ?= $(CURDIR)/dist + +GO_FLAGS := -ldflags "-X github.com/vespa-engine/vespa/client/go/build.Version=$(VERSION)" +GIT_ROOT := $(shell git rev-parse --show-toplevel) +DIST_TARGETS := dist-mac dist-linux dist-win32 dist-win64 all: test checkfmt install +# +# Dist targets +# + +# Bump the version of the vespa-cli formula and create a pull request to Homebrew repository. +# +# Example: +# +# $ git checkout vespa-X.Y.Z-1 +# $ make dist-github +dist-homebrew: dist-version + brew bump-formula-pr --tag vespa-$(VERSION)-1 --version $(VERSION) vespa-cli + +# Create a GitHub release draft for all platforms. Note that this only creates a +# draft, which is not publicly visible until it's explicitly published. +# +# Once the release has been created this prints an URL to the release draft. +# +# This requires the GitHub CLI to be installed: brew install gh +# +# Example: +# +# $ git checkout vespa-X.Y.Z-1 +# $ make dist-github +dist-github: dist + gh release create v$(VERSION) --repo vespa-engine/vespa --notes-file $(CURDIR)/README.md --draft --title "Vespa CLI $(VERSION)" \ + $(DIST)/vespa-cli_$(VERSION)_sha256sums.txt \ + $(DIST)/vespa-cli_$(VERSION)_*.{zip,tar.gz} + +# +# Cross-platform build targets +# + +dist: $(DIST_TARGETS) dist-sha256sums + +dist-mac: GOOS=darwin +dist-mac: GOARCH=amd64 + +dist-linux: GOOS=linux +dist-linux: GOARCH=amd64 + +dist-win32: GOOS=windows +dist-win32: GOARCH=386 + +dist-win64: GOOS=windows +dist-win64: GOARCH=amd64 + +$(DIST_TARGETS): DIST_NAME=vespa-cli_$(VERSION)_$(GOOS)_$(GOARCH) +$(DIST_TARGETS): dist-version manpages +$(DIST_TARGETS): + mkdir -p $(DIST)/$(DIST_NAME)/bin + env GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $(DIST)/$(DIST_NAME)/bin $(GO_FLAGS) ./... + cp -a $(GIT_ROOT)/LICENSE $(DIST)/$(DIST_NAME) + if [ "$(GOOS)" = "windows" ]; then \ + cd $(DIST) && zip -r $(DIST)/$(DIST_NAME).zip $(DIST_NAME); \ + else \ + cp -a share $(DIST)/$(DIST_NAME); \ + tar -czvf $(DIST)/$(DIST_NAME).tar.gz -C $(DIST) $(DIST_NAME); \ + fi + +dist-sha256sums: + cd $(DIST) && sha256sum vespa-cli_$(VERSION)_*.{zip,tar.gz} > vespa-cli_$(VERSION)_sha256sums.txt + +dist-version: +ifeq ($(VERSION),$(DEVEL_VERSION)) + $(error Invalid release version: $(VERSION). Try 'git checkout vespa-X.Y.Z-1' or 'env VERSION=X.Y.Z make ...') +endif + +# +# Development targets +# + install: - env GOBIN=$(BIN) \ - go install -ldflags "-X github.com/vespa-engine/vespa/client/go/build.Version=$(VERSION)" ./... + env GOBIN=$(BIN) go install $(GO_FLAGS) ./... manpages: install mkdir -p $(SHARE)/man/man1 $(BIN)/vespa man $(SHARE)/man/man1 clean: - rm -rf $(BIN) $(SHARE) + rm -rf $(BIN) $(SHARE) $(DIST) test: go test ./... diff --git a/client/go/README.md b/client/go/README.md new file mode 100644 index 00000000000..7b5b222503c --- /dev/null +++ b/client/go/README.md @@ -0,0 +1,6 @@ +The command-line tool for Vespa.ai. + +Use it on Vespa instances running locally, remotely or in the cloud. +Prefer web service API's to this in production. + +Vespa documentation: https://docs.vespa.ai diff --git a/client/go/cmd/api_key.go b/client/go/cmd/api_key.go index c94faa0d5e3..90cbdbc5bc1 100644 --- a/client/go/cmd/api_key.go +++ b/client/go/cmd/api_key.go @@ -7,7 +7,6 @@ import ( "fmt" "io/ioutil" "log" - "path/filepath" "github.com/spf13/cobra" "github.com/vespa-engine/vespa/client/go/util" @@ -29,16 +28,17 @@ var apiKeyCmd = &cobra.Command{ DisableAutoGenTag: true, Args: cobra.ExactArgs(0), Run: func(cmd *cobra.Command, args []string) { - configDir := configDir("") - if configDir == "" { - return - } app, err := vespa.ApplicationFromString(getApplication()) if err != nil { fatalErr(err, "Could not parse application") return } - apiKeyFile := filepath.Join(configDir, app.Tenant+".api-key.pem") + cfg, err := LoadConfig() + if err != nil { + fatalErr(err, "Could not load config") + return + } + apiKeyFile := cfg.APIKeyPath(app.Tenant) if util.PathExists(apiKeyFile) && !overwriteKey { printErrHint(fmt.Errorf("File %s already exists", apiKeyFile), "Use -f to overwrite it") printPublicKey(apiKeyFile, app.Tenant) diff --git a/client/go/cmd/api_key_test.go b/client/go/cmd/api_key_test.go index 0e50fd6d669..c00f520aa25 100644 --- a/client/go/cmd/api_key_test.go +++ b/client/go/cmd/api_key_test.go @@ -11,13 +11,13 @@ import ( ) func TestAPIKey(t *testing.T) { - configDir := t.TempDir() - keyFile := configDir + "/.vespa/t1.api-key.pem" + homeDir := t.TempDir() + keyFile := homeDir + "/.vespa/t1.api-key.pem" - out := execute(command{args: []string{"api-key", "-a", "t1.a1.i1"}, configDir: configDir}, t, nil) + out := execute(command{args: []string{"api-key", "-a", "t1.a1.i1"}, homeDir: homeDir}, t, nil) assert.True(t, strings.HasPrefix(out, "Success: API private key written to "+keyFile+"\n")) - out = execute(command{args: []string{"api-key", "-a", "t1.a1.i1"}, configDir: configDir}, t, nil) + out = execute(command{args: []string{"api-key", "-a", "t1.a1.i1"}, homeDir: homeDir}, t, nil) assert.True(t, strings.HasPrefix(out, "Error: File "+keyFile+" already exists\nHint: Use -f to overwrite it\n")) assert.True(t, strings.Contains(out, "This is your public key")) } diff --git a/client/go/cmd/cert.go b/client/go/cmd/cert.go index e1e11b6f73e..078c0704f9d 100644 --- a/client/go/cmd/cert.go +++ b/client/go/cmd/cert.go @@ -28,20 +28,34 @@ var certCmd = &cobra.Command{ DisableAutoGenTag: true, Args: cobra.MaximumNArgs(1), Run: func(cmd *cobra.Command, args []string) { - app := getApplication() + app, err := vespa.ApplicationFromString(getApplication()) + if err != nil { + fatalErr(err) + return + } pkg, err := vespa.ApplicationPackageFrom(applicationSource(args)) if err != nil { fatalErr(err) return } - configDir := configDir(app) - if configDir == "" { + cfg, err := LoadConfig() + if err != nil { + fatalErr(err) return } securityDir := filepath.Join(pkg.Path, "security") pkgCertificateFile := filepath.Join(securityDir, "clients.pem") - privateKeyFile := filepath.Join(configDir, "data-plane-private-key.pem") - certificateFile := filepath.Join(configDir, "data-plane-public-cert.pem") + privateKeyFile, err := cfg.PrivateKeyPath(app) + if err != nil { + fatalErr(err) + return + } + certificateFile, err := cfg.CertificatePath(app) + if err != nil { + fatalErr(err) + return + } + if !overwriteCertificate { for _, file := range []string{pkgCertificateFile, privateKeyFile, certificateFile} { if util.PathExists(file) { diff --git a/client/go/cmd/cert_test.go b/client/go/cmd/cert_test.go index e655f76b0f1..174b5fe5e9d 100644 --- a/client/go/cmd/cert_test.go +++ b/client/go/cmd/cert_test.go @@ -11,20 +11,23 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/vespa-engine/vespa/client/go/vespa" ) func TestCert(t *testing.T) { - tmpDir := t.TempDir() - mockApplicationPackage(t, tmpDir) - out := execute(command{args: []string{"cert", "-a", "t1.a1.i1", tmpDir}, configDir: tmpDir}, t, nil) + homeDir := t.TempDir() + mockApplicationPackage(t, homeDir) + out := execute(command{args: []string{"cert", "-a", "t1.a1.i1", homeDir}, homeDir: homeDir}, t, nil) - pkgCertificate := filepath.Join(tmpDir, "security", "clients.pem") - certificate := filepath.Join(tmpDir, ".vespa", "t1.a1.i1", "data-plane-public-cert.pem") - privateKey := filepath.Join(tmpDir, ".vespa", "t1.a1.i1", "data-plane-private-key.pem") + app, err := vespa.ApplicationFromString("t1.a1.i1") + assert.Nil(t, err) + pkgCertificate := filepath.Join(homeDir, "security", "clients.pem") + certificate := filepath.Join(homeDir, ".vespa", app.String(), "data-plane-public-cert.pem") + privateKey := filepath.Join(homeDir, ".vespa", app.String(), "data-plane-private-key.pem") assert.Equal(t, fmt.Sprintf("Success: Certificate written to %s\nSuccess: Certificate written to %s\nSuccess: Private key written to %s\n", pkgCertificate, certificate, privateKey), out) - out = execute(command{args: []string{"cert", "-a", "t1.a1.i1", tmpDir}, configDir: tmpDir}, t, nil) + out = execute(command{args: []string{"cert", "-a", "t1.a1.i1", homeDir}, homeDir: homeDir}, t, nil) assert.True(t, strings.HasPrefix(out, "Error: Certificate or private key")) } diff --git a/client/go/cmd/clone.go b/client/go/cmd/clone.go index ffd77030935..136872ecc8a 100644 --- a/client/go/cmd/clone.go +++ b/client/go/cmd/clone.go @@ -30,7 +30,7 @@ func init() { var cloneCmd = &cobra.Command{ // TODO: "application" and "list" subcommands? - Use: "clone <sample-application-path> <target-directory>", + Use: "clone sample-application-path target-directory", Short: "Create files and directory structure for a new Vespa application from a sample application", Long: `Creates an application package file structure. Example: "$ vespa clone vespa-cloud/album-recommendation my-app", diff --git a/client/go/cmd/command_tester.go b/client/go/cmd/command_tester.go index 4b084bd896c..095a1af7ac3 100644 --- a/client/go/cmd/command_tester.go +++ b/client/go/cmd/command_tester.go @@ -11,6 +11,7 @@ import ( "log" "net/http" "os" + "path/filepath" "strconv" "testing" "time" @@ -23,9 +24,9 @@ import ( ) type command struct { - configDir string - args []string - moreArgs []string + homeDir string + args []string + moreArgs []string } func execute(cmd command, t *testing.T, client *mockHttpClient) string { @@ -37,11 +38,11 @@ func execute(cmd command, t *testing.T, client *mockHttpClient) string { color = aurora.NewAurora(false) // Set config dir. Use a separate one per test if none is specified - if cmd.configDir == "" { - cmd.configDir = t.TempDir() + if cmd.homeDir == "" { + cmd.homeDir = t.TempDir() viper.Reset() } - os.Setenv("VESPA_CLI_HOME", cmd.configDir) + os.Setenv("VESPA_CLI_HOME", filepath.Join(cmd.homeDir, ".vespa")) // Reset flags to their default value - persistent flags in Cobra persists over tests rootCmd.Flags().VisitAll(func(f *pflag.Flag) { @@ -74,11 +75,8 @@ func executeCommand(t *testing.T, client *mockHttpClient, args []string, moreArg } type mockHttpClient struct { - // The HTTP status code that will be returned from the next invocation. Default: 200 - nextStatus int - - // The response body code that will be returned from the next invocation. Default: "" - nextBody string + // The responses to return for future requests. Once a response is consumed, it's removed from this array + nextResponses []mockResponse // A recording of the last HTTP request made through this lastRequest *http.Request @@ -87,19 +85,34 @@ type mockHttpClient struct { requests []*http.Request } -func (c *mockHttpClient) Do(request *http.Request, timeout time.Duration) (response *http.Response, error error) { - if c.nextStatus == 0 { - c.nextStatus = 200 +type mockResponse struct { + status int + body string +} + +func (c *mockHttpClient) NextStatus(status int) { c.NextResponse(status, "") } + +func (c *mockHttpClient) NextResponse(status int, body string) { + c.nextResponses = append(c.nextResponses, mockResponse{status: status, body: body}) +} + +func (c *mockHttpClient) Do(request *http.Request, timeout time.Duration) (*http.Response, error) { + response := mockResponse{status: 200} + if len(c.nextResponses) > 0 { + response = c.nextResponses[0] + c.nextResponses = c.nextResponses[1:] } c.lastRequest = request c.requests = append(c.requests, request) return &http.Response{ - Status: "Status " + strconv.Itoa(c.nextStatus), - StatusCode: c.nextStatus, - Body: ioutil.NopCloser(bytes.NewBufferString(c.nextBody)), + Status: "Status " + strconv.Itoa(response.status), + StatusCode: response.status, + Body: ioutil.NopCloser(bytes.NewBufferString(response.body)), Header: make(http.Header), }, nil } func (c *mockHttpClient) UseCertificate(certificate tls.Certificate) {} + +func convergeServices(client *mockHttpClient) { client.NextResponse(200, `{"converged":true}`) } diff --git a/client/go/cmd/config.go b/client/go/cmd/config.go index bb1662d0b07..13142c92553 100644 --- a/client/go/cmd/config.go +++ b/client/go/cmd/config.go @@ -6,6 +6,7 @@ package cmd import ( "fmt" + "io/ioutil" "log" "os" "path/filepath" @@ -43,90 +44,149 @@ var configCmd = &cobra.Command{ } var setConfigCmd = &cobra.Command{ - Use: "set <option> <value>", + Use: "set option-name value", Short: "Set a configuration option.", Example: "$ vespa config set target cloud", DisableAutoGenTag: true, Args: cobra.ExactArgs(2), Run: func(cmd *cobra.Command, args []string) { - if err := setOption(args[0], args[1]); err != nil { - log.Print(err) + cfg, err := LoadConfig() + if err != nil { + fatalErr(err, "Could not load config") + return + } + if err := cfg.Set(args[0], args[1]); err != nil { + fatalErr(err) } else { - writeConfig() + if err := cfg.Write(); err != nil { + fatalErr(err) + } } }, } var getConfigCmd = &cobra.Command{ - Use: "get [<option>]", + Use: "get option-name", Short: "Get a configuration option", Example: "$ vespa config get target", Args: cobra.MaximumNArgs(1), DisableAutoGenTag: true, Run: func(cmd *cobra.Command, args []string) { + cfg, err := LoadConfig() + if err != nil { + fatalErr(err, "Could not load config") + return + } + if len(args) == 0 { // Print all values - printOption(targetFlag) - printOption(applicationFlag) + printOption(cfg, targetFlag) + printOption(cfg, applicationFlag) } else { - printOption(args[0]) + printOption(cfg, args[0]) } }, } -func printOption(option string) { - value, err := getOption(option) - if err != nil { - value = color.Faint("<unset>").String() - } else { - value = color.Cyan(value).String() - } - log.Printf("%s = %s", option, value) +type Config struct { + Home string + createDirs bool } -func configDir(application string) string { +func LoadConfig() (*Config, error) { home := os.Getenv("VESPA_CLI_HOME") if home == "" { var err error home, err = os.UserHomeDir() if err != nil { - fatalErr(err, "Could not determine configuration directory") - return "" + return nil, err } + home = filepath.Join(home, ".vespa") } - configDir := filepath.Join(home, ".vespa", application) - if err := os.MkdirAll(configDir, 0755); err != nil { - fatalErr(err, "Could not create config directory") - return "" + if err := os.MkdirAll(home, 0700); err != nil { + return nil, err } - return configDir + c := &Config{Home: home, createDirs: true} + if err := c.load(); err != nil { + return nil, err + } + return c, nil } -func bindFlagToConfig(option string, command *cobra.Command) { - flagToConfigBindings[option] = command +func (c *Config) Write() error { + if err := os.MkdirAll(c.Home, 0700); err != nil { + return err + } + configFile := filepath.Join(c.Home, configName+"."+configType) + if !util.PathExists(configFile) { + if _, err := os.Create(configFile); err != nil { + return err + } + } + return viper.WriteConfig() +} + +func (c *Config) CertificatePath(app vespa.ApplicationID) (string, error) { + return c.applicationFilePath(app, "data-plane-public-cert.pem") +} + +func (c *Config) PrivateKeyPath(app vespa.ApplicationID) (string, error) { + return c.applicationFilePath(app, "data-plane-private-key.pem") +} + +func (c *Config) APIKeyPath(tenantName string) string { + return filepath.Join(c.Home, tenantName+".api-key.pem") +} + +func (c *Config) ReadAPIKey(tenantName string) ([]byte, error) { + return ioutil.ReadFile(c.APIKeyPath(tenantName)) } -func readConfig() { - configDir := configDir("") - if configDir == "" { - return +func (c *Config) ReadSessionID(app vespa.ApplicationID) (int64, error) { + sessionPath, err := c.applicationFilePath(app, "session_id") + if err != nil { + return 0, err + } + b, err := ioutil.ReadFile(sessionPath) + if err != nil { + return 0, err } + return strconv.ParseInt(strings.TrimSpace(string(b)), 10, 64) +} + +func (c *Config) WriteSessionID(app vespa.ApplicationID, sessionID int64) error { + sessionPath, err := c.applicationFilePath(app, "session_id") + if err != nil { + return err + } + return ioutil.WriteFile(sessionPath, []byte(fmt.Sprintf("%d\n", sessionID)), 0600) +} + +func (c *Config) applicationFilePath(app vespa.ApplicationID, name string) (string, error) { + appDir := filepath.Join(c.Home, app.String()) + if c.createDirs { + if err := os.MkdirAll(appDir, 0700); err != nil { + return "", err + } + } + return filepath.Join(appDir, name), nil +} + +func (c *Config) load() error { viper.SetConfigName(configName) viper.SetConfigType(configType) - viper.AddConfigPath(configDir) + viper.AddConfigPath(c.Home) viper.AutomaticEnv() for option, command := range flagToConfigBindings { viper.BindPFlag(option, command.PersistentFlags().Lookup(option)) } err := viper.ReadInConfig() if _, ok := err.(viper.ConfigFileNotFoundError); ok { - return // Fine - } - if err != nil { - fatalErr(err, "Could not read configuration") + return nil } + return err } -func getOption(option string) (string, error) { +func (c *Config) Get(option string) (string, error) { value := viper.GetString(option) if value == "" { return "", fmt.Errorf("no such option: %q", option) @@ -134,7 +194,7 @@ func getOption(option string) (string, error) { return value, nil } -func setOption(option, value string) error { +func (c *Config) Set(option, value string) error { switch option { case targetFlag: switch value { @@ -162,29 +222,16 @@ func setOption(option, value string) error { return fmt.Errorf("invalid option or value: %q: %q", option, value) } -func writeConfig() { - configDir := configDir("") - if configDir == "" { - return - } - - if !util.PathExists(configDir) { - if err := os.MkdirAll(configDir, 0700); err != nil { - fatalErr(err, "Could not create ", color.Cyan(configDir)) - return - } - } - - configFile := filepath.Join(configDir, configName+"."+configType) - if !util.PathExists(configFile) { - if _, err := os.Create(configFile); err != nil { - fatalErr(err, "Could not create ", color.Cyan(configFile)) - return - } +func printOption(cfg *Config, option string) { + value, err := cfg.Get(option) + if err != nil { + value = color.Faint("<unset>").String() + } else { + value = color.Cyan(value).String() } + log.Printf("%s = %s", option, value) +} - if err := viper.WriteConfig(); err != nil { - fatalErr(err, "Could not write config") - return - } +func bindFlagToConfig(option string, command *cobra.Command) { + flagToConfigBindings[option] = command } diff --git a/client/go/cmd/config_test.go b/client/go/cmd/config_test.go index dee63bcb58f..07d165d58e0 100644 --- a/client/go/cmd/config_test.go +++ b/client/go/cmd/config_test.go @@ -7,24 +7,24 @@ import ( ) func TestConfig(t *testing.T) { - configDir := t.TempDir() - assert.Equal(t, "invalid option or value: \"foo\": \"bar\"\n", execute(command{configDir: configDir, args: []string{"config", "set", "foo", "bar"}}, t, nil)) - assert.Equal(t, "foo = <unset>\n", execute(command{configDir: configDir, args: []string{"config", "get", "foo"}}, t, nil)) - assert.Equal(t, "target = local\n", execute(command{configDir: configDir, args: []string{"config", "get", "target"}}, t, nil)) - assert.Equal(t, "", execute(command{configDir: configDir, args: []string{"config", "set", "target", "cloud"}}, t, nil)) - assert.Equal(t, "target = cloud\n", execute(command{configDir: configDir, args: []string{"config", "get", "target"}}, t, nil)) - assert.Equal(t, "", execute(command{configDir: configDir, args: []string{"config", "set", "target", "http://127.0.0.1:8080"}}, t, nil)) - assert.Equal(t, "", execute(command{configDir: configDir, args: []string{"config", "set", "target", "https://127.0.0.1"}}, t, nil)) - assert.Equal(t, "target = https://127.0.0.1\n", execute(command{configDir: configDir, args: []string{"config", "get", "target"}}, t, nil)) + homeDir := t.TempDir() + assert.Equal(t, "invalid option or value: \"foo\": \"bar\"\n", execute(command{homeDir: homeDir, args: []string{"config", "set", "foo", "bar"}}, t, nil)) + assert.Equal(t, "foo = <unset>\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "foo"}}, t, nil)) + assert.Equal(t, "target = local\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "target"}}, t, nil)) + assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "target", "cloud"}}, t, nil)) + assert.Equal(t, "target = cloud\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "target"}}, t, nil)) + assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "target", "http://127.0.0.1:8080"}}, t, nil)) + assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "target", "https://127.0.0.1"}}, t, nil)) + assert.Equal(t, "target = https://127.0.0.1\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "target"}}, t, nil)) - assert.Equal(t, "invalid application: \"foo\"\n", execute(command{configDir: configDir, args: []string{"config", "set", "application", "foo"}}, t, nil)) - assert.Equal(t, "application = <unset>\n", execute(command{configDir: configDir, args: []string{"config", "get", "application"}}, t, nil)) - assert.Equal(t, "", execute(command{configDir: configDir, args: []string{"config", "set", "application", "t1.a1.i1"}}, t, nil)) - assert.Equal(t, "application = t1.a1.i1\n", execute(command{configDir: configDir, args: []string{"config", "get", "application"}}, t, nil)) + assert.Equal(t, "invalid application: \"foo\"\n", execute(command{homeDir: homeDir, args: []string{"config", "set", "application", "foo"}}, t, nil)) + assert.Equal(t, "application = <unset>\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "application"}}, t, nil)) + assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "application", "t1.a1.i1"}}, t, nil)) + assert.Equal(t, "application = t1.a1.i1\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "application"}}, t, nil)) - assert.Equal(t, "target = https://127.0.0.1\napplication = t1.a1.i1\n", execute(command{configDir: configDir, args: []string{"config", "get"}}, t, nil)) + assert.Equal(t, "target = https://127.0.0.1\napplication = t1.a1.i1\n", execute(command{homeDir: homeDir, args: []string{"config", "get"}}, t, nil)) - assert.Equal(t, "", execute(command{configDir: configDir, args: []string{"config", "set", "wait", "60"}}, t, nil)) - assert.Equal(t, "wait option must be an integer >= 0, got \"foo\"\n", execute(command{configDir: configDir, args: []string{"config", "set", "wait", "foo"}}, t, nil)) - assert.Equal(t, "wait = 60\n", execute(command{configDir: configDir, args: []string{"config", "get", "wait"}}, t, nil)) + assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "wait", "60"}}, t, nil)) + assert.Equal(t, "wait option must be an integer >= 0, got \"foo\"\n", execute(command{homeDir: homeDir, args: []string{"config", "set", "wait", "foo"}}, t, nil)) + assert.Equal(t, "wait = 60\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "wait"}}, t, nil)) } diff --git a/client/go/cmd/curl.go b/client/go/cmd/curl.go new file mode 100644 index 00000000000..4d949b51e8f --- /dev/null +++ b/client/go/cmd/curl.go @@ -0,0 +1,143 @@ +package cmd + +import ( + "fmt" + "log" + "os" + "os/exec" + "strings" + + "github.com/kballard/go-shellquote" + "github.com/spf13/cobra" + "github.com/vespa-engine/vespa/client/go/vespa" +) + +var curlDryRun bool +var curlPath string + +func init() { + rootCmd.AddCommand(curlCmd) + curlCmd.Flags().StringVarP(&curlPath, "path", "p", "", "The path to curl. If this is unset, curl from PATH is used") + curlCmd.Flags().BoolVarP(&curlDryRun, "dry-run", "n", false, "Print the curl command that would be executed") +} + +var curlCmd = &cobra.Command{ + Use: "curl [curl-options] path", + Short: "Query Vespa using curl", + Long: `Query Vespa using curl. + +Execute curl with the appropriate URL, certificate and private key for your application.`, + Example: `$ vespa curl /search/?yql=query +$ vespa curl -- -v --data-urlencode "yql=select * from sources * where title contains 'foo';" /search/ +$ vespa curl -t local -- -v /search/?yql=query +`, + DisableAutoGenTag: true, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + cfg, err := LoadConfig() + if err != nil { + fatalErr(err, "Could not load config") + return + } + app, err := vespa.ApplicationFromString(getApplication()) + if err != nil { + fatalErr(err) + return + } + privateKeyFile, err := cfg.PrivateKeyPath(app) + if err != nil { + fatalErr(err) + return + } + certificateFile, err := cfg.CertificatePath(app) + if err != nil { + fatalErr(err) + return + } + service := getService("query", 0) + c := &curl{privateKeyPath: privateKeyFile, certificatePath: certificateFile} + if curlDryRun { + cmd, err := c.command(service.BaseURL, args...) + if err != nil { + fatalErr(err, "Failed to create curl command") + return + } + log.Print(shellquote.Join(cmd.Args...)) + } else { + if err := c.run(service.BaseURL, args...); err != nil { + fatalErr(err, "Failed to run curl") + return + } + } + }, +} + +type curl struct { + path string + certificatePath string + privateKeyPath string +} + +func (c *curl) run(baseURL string, args ...string) error { + cmd, err := c.command(baseURL, args...) + if err != nil { + return err + } + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Start(); err != nil { + return err + } + return cmd.Wait() +} + +func (c *curl) command(baseURL string, args ...string) (*exec.Cmd, error) { + if len(args) == 0 { + return nil, fmt.Errorf("need at least one argument") + } + + if c.path == "" { + resolvedPath, err := resolveCurlPath() + if err != nil { + return nil, err + } + c.path = resolvedPath + } + + path := args[len(args)-1] + args = args[:len(args)-1] + if !hasOption("--key", args) && c.privateKeyPath != "" { + args = append(args, "--key", c.privateKeyPath) + } + if !hasOption("--cert", args) && c.certificatePath != "" { + args = append(args, "--cert", c.certificatePath) + } + + baseURL = strings.TrimSuffix(baseURL, "/") + path = strings.TrimPrefix(path, "/") + args = append(args, baseURL+"/"+path) + + return exec.Command(c.path, args...), nil +} + +func hasOption(option string, args []string) bool { + for _, arg := range args { + if arg == option { + return true + } + } + return false +} + +func resolveCurlPath() (string, error) { + var curlPath string + var err error + curlPath, err = exec.LookPath("curl") + if err != nil { + curlPath, err = exec.LookPath("curl.exe") + if err != nil { + return "", err + } + } + return curlPath, nil +} diff --git a/client/go/cmd/curl_test.go b/client/go/cmd/curl_test.go new file mode 100644 index 00000000000..c3163e731ce --- /dev/null +++ b/client/go/cmd/curl_test.go @@ -0,0 +1,53 @@ +package cmd + +import ( + "fmt" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCurl(t *testing.T) { + homeDir := t.TempDir() + httpClient := &mockHttpClient{} + convergeServices(httpClient) + out := execute(command{homeDir: homeDir, args: []string{"curl", "-n", "-p", "/usr/bin/curl", "-a", "t1.a1.i1", "--", "-v", "--data-urlencode", "arg=with space", "/search"}}, t, httpClient) + + expected := fmt.Sprintf("/usr/bin/curl -v --data-urlencode 'arg=with space' --key %s --cert %s https://127.0.0.1:8080/search\n", + filepath.Join(homeDir, ".vespa", "t1.a1.i1", "data-plane-private-key.pem"), + filepath.Join(homeDir, ".vespa", "t1.a1.i1", "data-plane-public-cert.pem")) + assert.Equal(t, expected, out) +} + +func TestCurlCommand(t *testing.T) { + c := &curl{path: "/usr/bin/curl", privateKeyPath: "/tmp/priv-key", certificatePath: "/tmp/cert-key"} + assertCurl(t, c, "/usr/bin/curl -v --key /tmp/priv-key --cert /tmp/cert-key https://example.com/", "-v", "/") + + c = &curl{path: "/usr/bin/curl", privateKeyPath: "/tmp/priv-key", certificatePath: "/tmp/cert-key"} + assertCurl(t, c, "/usr/bin/curl -v --cert my-cert --key my-key https://example.com/", "-v", "--cert", "my-cert", "--key", "my-key", "/") + + c = &curl{path: "/usr/bin/curl2"} + assertCurl(t, c, "/usr/bin/curl2 -v https://example.com/foo", "-v", "/foo") + + c = &curl{path: "/usr/bin/curl"} + assertCurl(t, c, "/usr/bin/curl -v https://example.com/foo/bar", "-v", "/foo/bar") + + c = &curl{path: "/usr/bin/curl"} + assertCurl(t, c, "/usr/bin/curl -v https://example.com/foo/bar", "-v", "foo/bar") + + c = &curl{path: "/usr/bin/curl"} + assertCurlURL(t, c, "/usr/bin/curl -v https://example.com/foo/bar", "https://example.com/", "-v", "foo/bar") +} + +func assertCurl(t *testing.T, c *curl, expectedOutput string, args ...string) { + assertCurlURL(t, c, expectedOutput, "https://example.com", args...) +} + +func assertCurlURL(t *testing.T, c *curl, expectedOutput string, url string, args ...string) { + cmd, err := c.command("https://example.com", args...) + assert.Nil(t, err) + + assert.Equal(t, expectedOutput, strings.Join(cmd.Args, " ")) +} diff --git a/client/go/cmd/deploy.go b/client/go/cmd/deploy.go index 19fa08ebaa4..866759b18c5 100644 --- a/client/go/cmd/deploy.go +++ b/client/go/cmd/deploy.go @@ -6,12 +6,7 @@ package cmd import ( "fmt" - "io/ioutil" "log" - "os" - "path/filepath" - "strconv" - "strings" "github.com/spf13/cobra" "github.com/vespa-engine/vespa/client/go/vespa" @@ -33,7 +28,7 @@ func init() { } var deployCmd = &cobra.Command{ - Use: "deploy [<application-directory>]", + Use: "deploy [application-directory]", Short: "Deploy (prepare and activate) an application package", Long: `Deploy (prepare and activate) an application package. @@ -51,6 +46,11 @@ If application directory is not specified, it defaults to working directory.`, fatalErr(nil, err.Error()) return } + cfg, err := LoadConfig() + if err != nil { + fatalErr(err, "Could not load config") + return + } target := getTarget() opts := vespa.DeploymentOpts{ApplicationPackage: pkg, Target: target} if opts.IsCloud() { @@ -58,7 +58,11 @@ If application directory is not specified, it defaults to working directory.`, if !opts.ApplicationPackage.HasCertificate() { fatalErrHint(fmt.Errorf("Missing certificate in application package"), "Applications in Vespa Cloud require a certificate", "Try 'vespa cert'") } - opts.APIKey = readAPIKey(deployment.Application.Tenant) + opts.APIKey, err = cfg.ReadAPIKey(deployment.Application.Tenant) + if err != nil { + fatalErrHint(err, "Deployment to cloud requires an API key. Try 'vespa api-key'") + return + } opts.Deployment = deployment } if sessionOrRunID, err := vespa.Deploy(opts); err == nil { @@ -83,7 +87,7 @@ If application directory is not specified, it defaults to working directory.`, } var prepareCmd = &cobra.Command{ - Use: "prepare <application-directory>", + Use: "prepare application-directory", Short: "Prepare an application package for activation", Args: cobra.MaximumNArgs(1), DisableAutoGenTag: true, @@ -93,8 +97,9 @@ var prepareCmd = &cobra.Command{ fatalErr(err, "Could not find application package") return } - configDir := configDir("default") - if configDir == "" { + cfg, err := LoadConfig() + if err != nil { + fatalErr(err, "Could not load config") return } target := getTarget() @@ -103,7 +108,10 @@ var prepareCmd = &cobra.Command{ Target: target, }) if err == nil { - writeSessionID(configDir, sessionID) + if err := cfg.WriteSessionID(vespa.DefaultApplication, sessionID); err != nil { + fatalErr(err, "Could not write session ID") + return + } printSuccess("Prepared ", color.Cyan(pkg.Path), " with session ", sessionID) } else { fatalErr(nil, err.Error()) @@ -122,8 +130,16 @@ var activateCmd = &cobra.Command{ fatalErr(err, "Could not find application package") return } - configDir := configDir("default") - sessionID := readSessionID(configDir) + cfg, err := LoadConfig() + if err != nil { + fatalErr(err, "Could not load config") + return + } + sessionID, err := cfg.ReadSessionID(vespa.DefaultApplication) + if err != nil { + fatalErr(err, "Could not read session ID") + return + } target := getTarget() err = vespa.Activate(sessionID, vespa.DeploymentOpts{ ApplicationPackage: pkg, @@ -144,26 +160,3 @@ func waitForQueryService(sessionOrRunID int64) { waitForService("query", sessionOrRunID) } } - -func writeSessionID(appConfigDir string, sessionID int64) { - if err := os.MkdirAll(appConfigDir, 0755); err != nil { - fatalErr(err, "Could not create directory for session ID") - } - if err := ioutil.WriteFile(sessionIDFile(appConfigDir), []byte(fmt.Sprintf("%d\n", sessionID)), 0600); err != nil { - fatalErr(err, "Could not write session ID") - } -} - -func readSessionID(appConfigDir string) int64 { - b, err := ioutil.ReadFile(sessionIDFile(appConfigDir)) - if err != nil { - fatalErr(err, "Could not read session ID") - } - id, err := strconv.ParseInt(strings.TrimSpace(string(b)), 10, 64) - if err != nil { - fatalErr(err, "Invalid session ID") - } - return id -} - -func sessionIDFile(appConfigDir string) string { return filepath.Join(appConfigDir, "session_id") } diff --git a/client/go/cmd/deploy_test.go b/client/go/cmd/deploy_test.go index 0e53feddbaf..f24ba0829f9 100644 --- a/client/go/cmd/deploy_test.go +++ b/client/go/cmd/deploy_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/vespa-engine/vespa/client/go/vespa" ) func TestPrepareZip(t *testing.T) { @@ -111,7 +112,7 @@ func assertDeploy(applicationPackage string, arguments []string, t *testing.T) { func assertPrepare(applicationPackage string, arguments []string, t *testing.T) { client := &mockHttpClient{} - client.nextBody = `{"session-id":"42"}` + client.NextResponse(200, `{"session-id":"42"}`) assert.Equal(t, "Success: Prepared "+applicationPackage+" with session 42\n", executeCommand(t, client, arguments, []string{})) @@ -124,12 +125,14 @@ func assertPrepare(applicationPackage string, arguments []string, t *testing.T) func assertActivate(applicationPackage string, arguments []string, t *testing.T) { client := &mockHttpClient{} - configDir := t.TempDir() - appConfigDir := filepath.Join(configDir, ".vespa", "default") - writeSessionID(appConfigDir, 42) + homeDir := t.TempDir() + cfg := Config{Home: filepath.Join(homeDir, ".vespa"), createDirs: true} + if err := cfg.WriteSessionID(vespa.DefaultApplication, 42); err != nil { + t.Fatal(err) + } assert.Equal(t, "Success: Activated "+applicationPackage+" with session 42\n", - execute(command{args: arguments, configDir: configDir}, t, client)) + execute(command{args: arguments, homeDir: homeDir}, t, client)) url := "http://127.0.0.1:19071/application/v2/tenant/default/session/42/active" assert.Equal(t, url, client.lastRequest.URL.String()) assert.Equal(t, "PUT", client.lastRequest.Method) @@ -155,14 +158,16 @@ func assertDeployRequestMade(target string, client *mockHttpClient, t *testing.T } func assertApplicationPackageError(t *testing.T, command string, status int, expectedMessage string, returnBody string) { - client := &mockHttpClient{nextStatus: status, nextBody: returnBody} + client := &mockHttpClient{} + client.NextResponse(status, returnBody) assert.Equal(t, "Error: Invalid application package (Status "+strconv.Itoa(status)+")\n\n"+expectedMessage+"\n", executeCommand(t, client, []string{command, "testdata/applications/withTarget/target/application.zip"}, []string{})) } func assertDeployServerError(t *testing.T, status int, errorMessage string) { - client := &mockHttpClient{nextStatus: status, nextBody: errorMessage} + client := &mockHttpClient{} + client.NextResponse(status, errorMessage) assert.Equal(t, "Error: Error from deploy service at 127.0.0.1:19071 (Status "+strconv.Itoa(status)+"):\n"+errorMessage+"\n", executeCommand(t, client, []string{"deploy", "testdata/applications/withTarget/target/application.zip"}, []string{})) diff --git a/client/go/cmd/document.go b/client/go/cmd/document.go index 450f061f140..d2552729aeb 100644 --- a/client/go/cmd/document.go +++ b/client/go/cmd/document.go @@ -22,7 +22,7 @@ func init() { } var documentCmd = &cobra.Command{ - Use: "document <json-file>", + Use: "document json-file", Short: "Issue a document operation to Vespa", Long: `Issue a document operation to Vespa. @@ -43,7 +43,7 @@ should be used instead of this.`, } var documentPutCmd = &cobra.Command{ - Use: "put [<id>] <json-file>", + Use: "put [id] json-file", Short: "Writes a document to Vespa", Long: `Writes the document in the given file to Vespa. If the document already exists, all its values will be replaced by this document. @@ -62,7 +62,7 @@ $ vespa document put id:mynamespace:music::a-head-full-of-dreams src/test/resour } var documentUpdateCmd = &cobra.Command{ - Use: "update [<id>] <json-file>", + Use: "update [id] json-file", Short: "Modifies some fields of an existing document", Long: `Updates the values of the fields given in a json file as specified in the file. If the document id is specified both as an argument and in the file the argument takes precedence.`, @@ -80,7 +80,7 @@ $ vespa document update id:mynamespace:music::a-head-full-of-dreams src/test/res } var documentRemoveCmd = &cobra.Command{ - Use: "remove <id or json.file>", + Use: "remove id | json-file", Short: "Removes a document from Vespa", Long: `Removes the document specified either as a document id or given in the json file. If the document id is specified both as an argument and in the file the argument takes precedence.`, @@ -98,7 +98,7 @@ $ vespa document remove id:mynamespace:music::a-head-full-of-dreams`, } var documentGetCmd = &cobra.Command{ - Use: "get <id>", + Use: "get id", Short: "Gets a document", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, diff --git a/client/go/cmd/document_test.go b/client/go/cmd/document_test.go index 59af042e611..c298d5ef285 100644 --- a/client/go/cmd/document_test.go +++ b/client/go/cmd/document_test.go @@ -62,6 +62,7 @@ func TestDocumentRemoveWithoutIdArg(t *testing.T) { func TestDocumentSendMissingId(t *testing.T) { arguments := []string{"document", "put", "testdata/A-Head-Full-of-Dreams-Without-Operation.json"} client := &mockHttpClient{} + convergeServices(client) assert.Equal(t, "Error: No document id given neither as argument or as a 'put' key in the json file\n", executeCommand(t, client, arguments, []string{})) @@ -70,6 +71,7 @@ func TestDocumentSendMissingId(t *testing.T) { func TestDocumentSendWithDisagreeingOperations(t *testing.T) { arguments := []string{"document", "update", "testdata/A-Head-Full-of-Dreams-Put.json"} client := &mockHttpClient{} + convergeServices(client) assert.Equal(t, "Error: Wanted document operation is update but the JSON file specifies put\n", executeCommand(t, client, arguments, []string{})) @@ -90,12 +92,12 @@ func TestDocumentGet(t *testing.T) { func assertDocumentSend(arguments []string, expectedOperation string, expectedMethod string, expectedDocumentId string, expectedPayloadFile string, t *testing.T) { client := &mockHttpClient{} + documentURL := documentServiceURL(client) assert.Equal(t, "Success: "+expectedOperation+" "+expectedDocumentId+"\n", executeCommand(t, client, arguments, []string{})) - target := getService("document", 0).BaseURL expectedPath, _ := vespa.IdToURLPath(expectedDocumentId) - assert.Equal(t, target+"/document/v1/"+expectedPath, client.lastRequest.URL.String()) + assert.Equal(t, documentURL+"/document/v1/"+expectedPath, client.lastRequest.URL.String()) assert.Equal(t, "application/json", client.lastRequest.Header.Get("Content-Type")) assert.Equal(t, expectedMethod, client.lastRequest.Method) @@ -104,9 +106,9 @@ func assertDocumentSend(arguments []string, expectedOperation string, expectedMe } func assertDocumentGet(arguments []string, documentId string, t *testing.T) { - client := &mockHttpClient{ - nextBody: "{\"fields\":{\"foo\":\"bar\"}}", - } + client := &mockHttpClient{} + documentURL := documentServiceURL(client) + client.NextResponse(200, "{\"fields\":{\"foo\":\"bar\"}}") assert.Equal(t, `{ "fields": { @@ -115,14 +117,15 @@ func assertDocumentGet(arguments []string, documentId string, t *testing.T) { } `, executeCommand(t, client, arguments, []string{})) - target := getService("document", 0).BaseURL expectedPath, _ := vespa.IdToURLPath(documentId) - assert.Equal(t, target+"/document/v1/"+expectedPath, client.lastRequest.URL.String()) + assert.Equal(t, documentURL+"/document/v1/"+expectedPath, client.lastRequest.URL.String()) assert.Equal(t, "GET", client.lastRequest.Method) } func assertDocumentError(t *testing.T, status int, errorMessage string) { - client := &mockHttpClient{nextStatus: status, nextBody: errorMessage} + client := &mockHttpClient{} + convergeServices(client) + client.NextResponse(status, errorMessage) assert.Equal(t, "Error: Invalid document operation: Status "+strconv.Itoa(status)+"\n\n"+errorMessage+"\n", executeCommand(t, client, []string{"document", "put", @@ -131,10 +134,17 @@ func assertDocumentError(t *testing.T, status int, errorMessage string) { } func assertDocumentServerError(t *testing.T, status int, errorMessage string) { - client := &mockHttpClient{nextStatus: status, nextBody: errorMessage} + client := &mockHttpClient{} + convergeServices(client) + client.NextResponse(status, errorMessage) assert.Equal(t, "Error: Container (document API) at 127.0.0.1:8080: Status "+strconv.Itoa(status)+"\n\n"+errorMessage+"\n", executeCommand(t, client, []string{"document", "put", "id:mynamespace:music::a-head-full-of-dreams", "testdata/A-Head-Full-of-Dreams-Put.json"}, []string{})) } + +func documentServiceURL(client *mockHttpClient) string { + convergeServices(client) + return getService("document", 0).BaseURL +} diff --git a/client/go/cmd/helpers.go b/client/go/cmd/helpers.go index b672419cae6..14699abf40e 100644 --- a/client/go/cmd/helpers.go +++ b/client/go/cmd/helpers.go @@ -10,7 +10,6 @@ import ( "io/ioutil" "log" "os" - "path/filepath" "strings" "time" @@ -51,16 +50,6 @@ func printSuccess(msg ...interface{}) { log.Print(color.Green("Success: "), fmt.Sprint(msg...)) } -func readAPIKey(tenant string) []byte { - configDir := configDir("") - apiKeyPath := filepath.Join(configDir, tenant+".api-key.pem") - key, err := ioutil.ReadFile(apiKeyPath) - if err != nil { - fatalErrHint(err, "Deployment to cloud requires an API key. Try 'vespa api-key'") - } - return key -} - func deploymentFromArgs() vespa.Deployment { zone, err := vespa.ZoneFromString(zoneArg) if err != nil { @@ -81,7 +70,12 @@ func applicationSource(args []string) string { } func getApplication() string { - app, err := getOption(applicationFlag) + cfg, err := LoadConfig() + if err != nil { + fatalErr(err, "Could not load config") + return "" + } + app, err := cfg.Get(applicationFlag) if err != nil { fatalErr(err, "A valid application must be specified") } @@ -89,7 +83,12 @@ func getApplication() string { } func getTargetType() string { - target, err := getOption(targetFlag) + cfg, err := LoadConfig() + if err != nil { + fatalErr(err, "Could not load config") + return "" + } + target, err := cfg.Get(targetFlag) if err != nil { fatalErr(err, "A valid target must be specified") } @@ -122,10 +121,25 @@ func getTarget() vespa.Target { return vespa.LocalTarget() case "cloud": deployment := deploymentFromArgs() - apiKey := readAPIKey(deployment.Application.Tenant) - configDir := configDir(deployment.Application.String()) - privateKeyFile := filepath.Join(configDir, "data-plane-private-key.pem") - certificateFile := filepath.Join(configDir, "data-plane-public-cert.pem") + cfg, err := LoadConfig() + if err != nil { + fatalErr(err, "Could not load config") + return nil + } + apiKey, err := ioutil.ReadFile(cfg.APIKeyPath(deployment.Application.Tenant)) + if err != nil { + fatalErrHint(err, "Deployment to cloud requires an API key. Try 'vespa api-key'") + } + privateKeyFile, err := cfg.PrivateKeyPath(deployment.Application) + if err != nil { + fatalErr(err) + return nil + } + certificateFile, err := cfg.CertificatePath(deployment.Application) + if err != nil { + fatalErr(err) + return nil + } kp, err := tls.LoadX509KeyPair(certificateFile, privateKeyFile) if err != nil { fatalErr(err, "Could not read key pair") diff --git a/client/go/cmd/man.go b/client/go/cmd/man.go index 0bd80f3d985..ff7f6fb1b6a 100644 --- a/client/go/cmd/man.go +++ b/client/go/cmd/man.go @@ -10,7 +10,7 @@ func init() { } var manCmd = &cobra.Command{ - Use: "man <directory>", + Use: "man directory", Short: "Generate man pages and write them to given directory", Args: cobra.ExactArgs(1), Hidden: true, // Not intended to be called by users diff --git a/client/go/cmd/query.go b/client/go/cmd/query.go index ea80c037721..f05914eb9a7 100644 --- a/client/go/cmd/query.go +++ b/client/go/cmd/query.go @@ -20,7 +20,7 @@ func init() { } var queryCmd = &cobra.Command{ - Use: "query <query-parameters>", + Use: "query query-parameters", Short: "Issue a query to Vespa", Example: `$ vespa query "yql=select * from sources * where title contains 'foo';" hits=5`, Long: `Issue a query to Vespa. diff --git a/client/go/cmd/query_test.go b/client/go/cmd/query_test.go index af9f9c4cfd5..bd9ae91f24d 100644 --- a/client/go/cmd/query_test.go +++ b/client/go/cmd/query_test.go @@ -44,25 +44,20 @@ func TestServerError(t *testing.T) { } func assertQuery(t *testing.T, expectedQuery string, query ...string) { - client := &mockHttpClient{nextBody: "{\"query\":\"result\"}"} + client := &mockHttpClient{} + queryURL := queryServiceURL(client) + client.NextResponse(200, "{\"query\":\"result\"}") assert.Equal(t, "{\n \"query\": \"result\"\n}\n", executeCommand(t, client, []string{"query"}, query), "query output") - assert.Equal(t, getService("query", 0).BaseURL+"/search/"+expectedQuery, client.lastRequest.URL.String()) -} - -func assertQueryNonJsonResult(t *testing.T, expectedQuery string, query ...string) { - client := &mockHttpClient{nextBody: "query result"} - assert.Equal(t, - "query result\n", - executeCommand(t, client, []string{"query"}, query), - "query output") - assert.Equal(t, getService("query", 0).BaseURL+"/search/"+expectedQuery, client.lastRequest.URL.String()) + assert.Equal(t, queryURL+"/search/"+expectedQuery, client.lastRequest.URL.String()) } func assertQueryError(t *testing.T, status int, errorMessage string) { - client := &mockHttpClient{nextStatus: status, nextBody: errorMessage} + client := &mockHttpClient{} + convergeServices(client) + client.NextResponse(status, errorMessage) assert.Equal(t, "Error: Invalid query: Status "+strconv.Itoa(status)+"\n"+errorMessage+"\n", executeCommand(t, client, []string{"query"}, []string{"yql=select from sources * where title contains 'foo'"}), @@ -70,9 +65,16 @@ func assertQueryError(t *testing.T, status int, errorMessage string) { } func assertQueryServiceError(t *testing.T, status int, errorMessage string) { - client := &mockHttpClient{nextStatus: status, nextBody: errorMessage} + client := &mockHttpClient{} + convergeServices(client) + client.NextResponse(status, errorMessage) assert.Equal(t, "Error: Status "+strconv.Itoa(status)+" from container at 127.0.0.1:8080\n"+errorMessage+"\n", executeCommand(t, client, []string{"query"}, []string{"yql=select from sources * where title contains 'foo'"}), "error output") } + +func queryServiceURL(client *mockHttpClient) string { + convergeServices(client) + return getService("query", 0).BaseURL +} diff --git a/client/go/cmd/root.go b/client/go/cmd/root.go index fde7d6edb5a..d218d3639b1 100644 --- a/client/go/cmd/root.go +++ b/client/go/cmd/root.go @@ -18,7 +18,7 @@ var ( // TODO: add timeout flag // TODO: add flag to show http request made rootCmd = &cobra.Command{ - Use: "vespa <command>", + Use: "vespa command-name", Short: "The command-line tool for Vespa.ai", Long: `The command-line tool for Vespa.ai. @@ -49,7 +49,6 @@ func configureLogger() { func init() { configureLogger() - cobra.OnInitialize(readConfig) rootCmd.PersistentFlags().StringVarP(&targetArg, targetFlag, "t", "local", "The name or URL of the recipient of this command") rootCmd.PersistentFlags().StringVarP(&applicationArg, applicationFlag, "a", "", "The application to manage") rootCmd.PersistentFlags().IntVarP(&waitSecsArg, waitFlag, "w", 0, "Number of seconds to wait for a service to become ready") diff --git a/client/go/cmd/status_test.go b/client/go/cmd/status_test.go index 488f979008c..8ddca71a35b 100644 --- a/client/go/cmd/status_test.go +++ b/client/go/cmd/status_test.go @@ -15,7 +15,7 @@ func TestStatusDeployCommand(t *testing.T) { } func TestStatusDeployCommandWithURLTarget(t *testing.T) { - assertDeployStatus("http://mydeploytarget", []string{"-t", "http://mydeploytarget"}, t) + assertDeployStatus("http://mydeploytarget:19071", []string{"-t", "http://mydeploytarget"}, t) } func TestStatusDeployCommandWithLocalTarget(t *testing.T) { @@ -27,7 +27,7 @@ func TestStatusQueryCommand(t *testing.T) { } func TestStatusQueryCommandWithUrlTarget(t *testing.T) { - assertQueryStatus("http://mycontainertarget", []string{"-t", "http://mycontainertarget"}, t) + assertQueryStatus("http://mycontainertarget:8080", []string{"-t", "http://mycontainertarget"}, t) } func TestStatusQueryCommandWithLocalTarget(t *testing.T) { @@ -44,6 +44,7 @@ func TestStatusErrorResponse(t *testing.T) { func assertDeployStatus(target string, args []string, t *testing.T) { client := &mockHttpClient{} + convergeServices(client) assert.Equal(t, "Deploy API at "+target+" is ready\n", executeCommand(t, client, []string{"status", "deploy"}, args), @@ -53,12 +54,14 @@ func assertDeployStatus(target string, args []string, t *testing.T) { func assertQueryStatus(target string, args []string, t *testing.T) { client := &mockHttpClient{} + convergeServices(client) assert.Equal(t, "Container (query API) at "+target+" is ready\n", executeCommand(t, client, []string{"status", "query"}, args), "vespa status container") assert.Equal(t, target+"/ApplicationStatus", client.lastRequest.URL.String()) + convergeServices(client) assert.Equal(t, "Container (query API) at "+target+" is ready\n", executeCommand(t, client, []string{"status"}, args), @@ -68,6 +71,7 @@ func assertQueryStatus(target string, args []string, t *testing.T) { func assertDocumentStatus(target string, args []string, t *testing.T) { client := &mockHttpClient{} + convergeServices(client) assert.Equal(t, "Container (document API) at "+target+" is ready\n", executeCommand(t, client, []string{"status", "document"}, args), @@ -76,7 +80,9 @@ func assertDocumentStatus(target string, args []string, t *testing.T) { } func assertQueryStatusError(target string, args []string, t *testing.T) { - client := &mockHttpClient{nextStatus: 500} + client := &mockHttpClient{} + convergeServices(client) + client.NextStatus(500) assert.Equal(t, "Container (query API) at "+target+" is not ready\nStatus 500\n", executeCommand(t, client, []string{"status", "container"}, args), diff --git a/client/go/cmd/version.go b/client/go/cmd/version.go index 4a5b6ec71b3..05820f4e34b 100644 --- a/client/go/cmd/version.go +++ b/client/go/cmd/version.go @@ -2,6 +2,7 @@ package cmd import ( "log" + "runtime" "github.com/spf13/cobra" "github.com/vespa-engine/vespa/client/go/build" @@ -17,6 +18,6 @@ var versionCmd = &cobra.Command{ DisableAutoGenTag: true, Args: cobra.ExactArgs(0), Run: func(cmd *cobra.Command, args []string) { - log.Print("vespa version ", build.Version) + log.Printf("vespa version %s compiled with %v on %v/%v", build.Version, runtime.Version(), runtime.GOOS, runtime.GOARCH) }, } diff --git a/client/go/cmd/version_test.go b/client/go/cmd/version_test.go index 02303a08e21..fc977c47938 100644 --- a/client/go/cmd/version_test.go +++ b/client/go/cmd/version_test.go @@ -7,5 +7,5 @@ import ( ) func TestVersion(t *testing.T) { - assert.Equal(t, "vespa version 0.0.0-devel\n", execute(command{args: []string{"version"}}, t, nil)) + assert.Contains(t, execute(command{args: []string{"version"}}, t, nil), "vespa version 0.0.0-devel compiled with") } diff --git a/client/go/go.mod b/client/go/go.mod index 893add7218b..509eb273c6c 100644 --- a/client/go/go.mod +++ b/client/go/go.mod @@ -3,6 +3,7 @@ module github.com/vespa-engine/vespa/client/go go 1.15 require ( + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/logrusorgru/aurora v2.0.3+incompatible github.com/mattn/go-colorable v0.0.9 github.com/mattn/go-isatty v0.0.3 diff --git a/client/go/go.sum b/client/go/go.sum index 826f137d5e2..97328690ee5 100644 --- a/client/go/go.sum +++ b/client/go/go.sum @@ -170,6 +170,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= diff --git a/client/go/vespa/deploy.go b/client/go/vespa/deploy.go index 081e9fc17d2..22ab5380c23 100644 --- a/client/go/vespa/deploy.go +++ b/client/go/vespa/deploy.go @@ -23,6 +23,8 @@ import ( "github.com/vespa-engine/vespa/client/go/util" ) +var DefaultApplication = ApplicationID{Tenant: "default", Application: "application", Instance: "default"} + type ApplicationID struct { Tenant string Application string diff --git a/client/go/vespa/target.go b/client/go/vespa/target.go index faf80736293..ada2b2151b2 100644 --- a/client/go/vespa/target.go +++ b/client/go/vespa/target.go @@ -6,6 +6,7 @@ import ( "fmt" "io/ioutil" "net/http" + "net/url" "time" "github.com/vespa-engine/vespa/client/go/util" @@ -29,7 +30,6 @@ const ( type Service struct { BaseURL string Name string - description string certificate tls.Certificate } @@ -41,9 +41,8 @@ type Target interface { // Service returns the service for given name. Service(name string) (*Service, error) - // DiscoverServices queries for services available on this target after the given session or deployment run has - // completed. - DiscoverServices(timeout time.Duration, sessionOrRunID int64) error + // DiscoverServices queries for services available on this target after the deployment run has completed. + DiscoverServices(timeout time.Duration, runID int64) error } type customTarget struct { @@ -51,8 +50,6 @@ type customTarget struct { baseURL string } -type localTarget struct{ targetType string } - // Do sends request to this service. Any required authentication happens automatically. func (s *Service) Do(request *http.Request, timeout time.Duration) (*http.Response, error) { if s.certificate.Certificate != nil { @@ -77,7 +74,7 @@ func (s *Service) Wait(timeout time.Duration) (int, error) { return 0, err } okFunc := func(status int, response []byte) (bool, error) { return status/100 == 2, nil } - return wait(okFunc, req, s.certificate, timeout) + return wait(okFunc, req, &s.certificate, timeout) } func (s *Service) Description() string { @@ -97,27 +94,65 @@ func (t *customTarget) Type() string { return t.targetType } func (t *customTarget) Service(name string) (*Service, error) { switch name { case deployService, queryService, documentService: - // TODO: Add default ports if missing - return &Service{BaseURL: t.baseURL, Name: name}, nil + url, err := t.urlWithPort(name) + if err != nil { + return nil, err + } + return &Service{BaseURL: url, Name: name}, nil } return nil, fmt.Errorf("unknown service: %s", name) } -func (t *customTarget) DiscoverServices(timeout time.Duration, sessionID int64) error { return nil } - -func (t *localTarget) Type() string { return t.targetType } - -func (t *localTarget) Service(name string) (*Service, error) { - switch name { - case deployService: - return &Service{Name: name, BaseURL: "http://127.0.0.1:19071"}, nil - case queryService, documentService: - return &Service{Name: name, BaseURL: "http://127.0.0.1:8080"}, nil +func (t *customTarget) urlWithPort(serviceName string) (string, error) { + u, err := url.Parse(t.baseURL) + if err != nil { + return "", err } - return nil, fmt.Errorf("unknown service: %s", name) + port := u.Port() + if port == "" { + switch serviceName { + case deployService: + port = "19071" + case queryService, documentService: + port = "8080" + default: + return "", fmt.Errorf("unknown service: %s", serviceName) + } + u.Host = u.Host + ":" + port + } + return u.String(), nil } -func (t *localTarget) DiscoverServices(timeout time.Duration, sessionID int64) error { return nil } +func (t *customTarget) DiscoverServices(timeout time.Duration, runID int64) error { + deployService, err := t.Service("deploy") + if err != nil { + return err + } + url := fmt.Sprintf("%s/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge", deployService.BaseURL) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + converged := false + convergedFunc := func(status int, response []byte) (bool, error) { + if status/100 != 2 { + return false, nil + } + var resp serviceConvergeResponse + if err := json.Unmarshal(response, &resp); err != nil { + return false, nil + } + converged = resp.Converged + return converged, nil + } + if _, err := wait(convergedFunc, req, nil, timeout); err != nil { + return err + } + if !converged { + return fmt.Errorf("services have not converged") + } + return nil +} type cloudTarget struct { cloudAPI string @@ -190,7 +225,7 @@ func (t *cloudTarget) waitForRun(signer *RequestSigner, runID int64, timeout tim } return true, nil } - _, err = wait(jobSuccessFunc, req, t.keyPair, timeout) + _, err = wait(jobSuccessFunc, req, &t.keyPair, timeout) return err } @@ -221,7 +256,7 @@ func (t *cloudTarget) discoverEndpoints(signer *RequestSigner, timeout time.Dura endpointURL = resp.Endpoints[0].URL return true, nil } - if _, err = wait(endpointFunc, req, t.keyPair, timeout); err != nil { + if _, err = wait(endpointFunc, req, &t.keyPair, timeout); err != nil { return err } if endpointURL == "" { @@ -233,7 +268,9 @@ func (t *cloudTarget) discoverEndpoints(signer *RequestSigner, timeout time.Dura } // LocalTarget creates a target for a Vespa platform running locally. -func LocalTarget() Target { return &localTarget{targetType: localTargetType} } +func LocalTarget() Target { + return &customTarget{targetType: localTargetType, baseURL: "http://127.0.0.1"} +} // CustomTarget creates a Target for a Vespa platform running at baseURL. func CustomTarget(baseURL string) Target { @@ -264,11 +301,15 @@ type jobResponse struct { Status string `json:"status"` } +type serviceConvergeResponse struct { + Converged bool `json:"converged"` +} + type responseFunc func(status int, response []byte) (bool, error) -func wait(fn responseFunc, req *http.Request, certificate tls.Certificate, timeout time.Duration) (int, error) { - if certificate.Certificate != nil { - util.ActiveHttpClient.UseCertificate(certificate) +func wait(fn responseFunc, req *http.Request, certificate *tls.Certificate, timeout time.Duration) (int, error) { + if certificate != nil { + util.ActiveHttpClient.UseCertificate(*certificate) } var ( httpErr error diff --git a/client/go/vespa/target_test.go b/client/go/vespa/target_test.go index 1f46cc83178..213b2d93cf8 100644 --- a/client/go/vespa/target_test.go +++ b/client/go/vespa/target_test.go @@ -11,26 +11,29 @@ import ( ) type mockVespaApi struct { - endpointsReady bool - serverURL string + deploymentConverged bool + serverURL string } func (v *mockVespaApi) mockVespaHandler(w http.ResponseWriter, req *http.Request) { switch req.URL.Path { case "/application/v4/tenant/t1/application/a1/instance/i1/environment/dev/region/us-north-1": response := "{}" - if v.endpointsReady { + if v.deploymentConverged { response = fmt.Sprintf(`{"endpoints": [{"url": "%s"}]}`, v.serverURL) } w.Write([]byte(response)) case "/application/v4/tenant/t1/application/a1/instance/i1/job/dev-us-north-1/run/42": - response := "{}" - if v.endpointsReady { + var response string + if v.deploymentConverged { response = `{"active": false, "status": "success"}` } else { response = `{"active": true, "status": "running"}` } w.Write([]byte(response)) + case "/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge": + response := fmt.Sprintf(`{"converged": %t}`, v.deploymentConverged) + w.Write([]byte(response)) case "/status.html": w.Write([]byte("OK")) case "/ApplicationStatus": @@ -42,12 +45,36 @@ func (v *mockVespaApi) mockVespaHandler(w http.ResponseWriter, req *http.Request } } +func TestCustomTarget(t *testing.T) { + lt := LocalTarget() + assertServiceURL(t, "http://127.0.0.1:19071", lt, "deploy") + assertServiceURL(t, "http://127.0.0.1:8080", lt, "query") + assertServiceURL(t, "http://127.0.0.1:8080", lt, "document") + + ct := CustomTarget("http://192.0.2.42") + assertServiceURL(t, "http://192.0.2.42:19071", ct, "deploy") + assertServiceURL(t, "http://192.0.2.42:8080", ct, "query") + assertServiceURL(t, "http://192.0.2.42:8080", ct, "document") + + ct2 := CustomTarget("http://192.0.2.42:60000") + assertServiceURL(t, "http://192.0.2.42:60000", ct2, "deploy") + assertServiceURL(t, "http://192.0.2.42:60000", ct2, "query") + assertServiceURL(t, "http://192.0.2.42:60000", ct2, "document") +} + func TestCustomTargetWait(t *testing.T) { vc := mockVespaApi{} srv := httptest.NewServer(http.HandlerFunc(vc.mockVespaHandler)) defer srv.Close() target := CustomTarget(srv.URL) + err := target.DiscoverServices(0, 42) + assert.NotNil(t, err) + + vc.deploymentConverged = true + err = target.DiscoverServices(0, 42) + assert.Nil(t, err) + assertServiceWait(t, 200, target, "deploy") assertServiceWait(t, 500, target, "query") assertServiceWait(t, 500, target, "document") @@ -87,7 +114,7 @@ func TestCloudTargetWait(t *testing.T) { err = target.DiscoverServices(0, 42) assert.NotNil(t, err) - vc.endpointsReady = true + vc.deploymentConverged = true err = target.DiscoverServices(0, 42) assert.Nil(t, err) @@ -95,6 +122,12 @@ func TestCloudTargetWait(t *testing.T) { assertServiceWait(t, 500, target, "document") } +func assertServiceURL(t *testing.T, url string, target Target, service string) { + s, err := target.Service(service) + assert.Nil(t, err) + assert.Equal(t, url, s.BaseURL) +} + func assertServiceWait(t *testing.T, expectedStatus int, target Target, service string) { s, err := target.Service(service) assert.Nil(t, err) diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java index 1817f09ae46..e8a53b95566 100644 --- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java +++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java @@ -3,8 +3,6 @@ package com.yahoo.config.model.application.provider; import com.yahoo.config.FileReference; import com.yahoo.config.application.api.FileRegistry; -import com.yahoo.net.HostName; -import net.jpountz.xxhash.XXHashFactory; import java.nio.ByteBuffer; import java.util.ArrayList; diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java index a6b56640c97..65e7635b92f 100644 --- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java +++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java @@ -23,6 +23,7 @@ import java.security.cert.X509Certificate; import java.util.List; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ExecutorService; /** * Model context containing state provided to model factories. @@ -39,6 +40,7 @@ public interface ModelContext { DeployLogger deployLogger(); ConfigDefinitionRepo configDefinitionRepo(); FileRegistry getFileRegistry(); + ExecutorService getExecutor(); default Optional<? extends Reindexing> reindexing() { return Optional.empty(); } Properties properties(); default Optional<File> appDir() { return Optional.empty();} @@ -83,7 +85,7 @@ public interface ModelContext { @ModelFeatureFlag(owners = {"baldersheim"}) default boolean enforceRankProfileInheritance() { return false; } @ModelFeatureFlag(owners = {"baldersheim"}) default int largeRankExpressionLimit() { return 8192; } @ModelFeatureFlag(owners = {"baldersheim"}) default boolean useExternalRankExpressions() { return true; } - @ModelFeatureFlag(owners = {"baldersheim"}) default boolean distributeExternalRankExpressions() { return false; } + @ModelFeatureFlag(owners = {"baldersheim"}) default boolean distributeExternalRankExpressions() { return true; } @ModelFeatureFlag(owners = {"baldersheim"}) default int maxConcurrentMergesPerNode() { throw new UnsupportedOperationException("TODO specify default value"); } @ModelFeatureFlag(owners = {"baldersheim"}) default int maxMergeQueueSize() { throw new UnsupportedOperationException("TODO specify default value"); } @ModelFeatureFlag(owners = {"baldersheim"}) default boolean dryRunOnnxOnSetup() { return true; } diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java index 249ca71117a..13769be9ec1 100644 --- a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java +++ b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java @@ -145,7 +145,7 @@ public class DeployState implements ConfigDefinitionStore { this.zone = zone; this.queryProfiles = queryProfiles; // TODO: Remove this by seeing how pagetemplates are propagated this.semanticRules = semanticRules; // TODO: Remove this by seeing how pagetemplates are propagated - this.importedModels = importMlModels(applicationPackage, modelImporters, deployLogger); + this.importedModels = importMlModels(applicationPackage, modelImporters, deployLogger, executor); this.validationOverrides = applicationPackage.getValidationOverrides().map(ValidationOverrides::fromXml) .orElse(ValidationOverrides.empty); @@ -211,9 +211,10 @@ public class DeployState implements ConfigDefinitionStore { private static ImportedMlModels importMlModels(ApplicationPackage applicationPackage, Collection<MlModelImporter> modelImporters, - DeployLogger deployLogger) { + DeployLogger deployLogger, + ExecutorService executor) { File importFrom = applicationPackage.getFileReference(ApplicationPackage.MODELS_DIR); - ImportedMlModels importedModels = new ImportedMlModels(importFrom, modelImporters); + ImportedMlModels importedModels = new ImportedMlModels(importFrom, executor, modelImporters); for (var entry : importedModels.getSkippedModels().entrySet()) { deployLogger.logApplicationPackage(Level.WARNING, "Skipping import of model " + entry.getKey() + " as an exception " + "occurred during import. Error: " + entry.getValue()); diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java index b42686406b5..9690f00a209 100644 --- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java +++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java @@ -51,6 +51,7 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea private double feedConcurrency = 0.5; private boolean enableFeedBlockInDistributor = true; private boolean useExternalRankExpression = true; + private boolean enforceRankProfileInheritance = true; private int maxActivationInhibitedOutOfSyncGroups = 0; private List<TenantSecretStore> tenantSecretStores = Collections.emptyList(); private String jvmOmitStackTraceInFastThrowOption; @@ -107,8 +108,12 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea @Override public double resourceLimitMemory() { return resourceLimitMemory; } @Override public double minNodeRatioPerGroup() { return minNodeRatioPerGroup; } @Override public int metricsproxyNumThreads() { return 1; } - @Override public boolean enforceRankProfileInheritance() { return true; } + @Override public boolean enforceRankProfileInheritance() { return enforceRankProfileInheritance; } + public TestProperties enforceRankProfileInheritance(boolean value) { + enforceRankProfileInheritance = value; + return this; + } public TestProperties useExternalRankExpression(boolean value) { useExternalRankExpression = value; return this; diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java b/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java index 97770b6ebb9..2a85b0b85eb 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java @@ -3,6 +3,7 @@ package com.yahoo.searchdefinition; import ai.vespa.rankingexpression.importer.configmodelview.ImportedMlModels; import com.yahoo.config.application.api.ApplicationPackage; +import com.yahoo.config.application.api.DeployLogger; import com.yahoo.search.query.profile.QueryProfileRegistry; import com.yahoo.search.query.profile.types.FieldDescription; import com.yahoo.search.query.profile.types.QueryProfileType; @@ -44,7 +45,6 @@ import java.util.OptionalDouble; import java.util.Set; import java.util.function.Supplier; import java.util.logging.Level; -import java.util.logging.Logger; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -55,7 +55,6 @@ import java.util.stream.Stream; */ public class RankProfile implements Cloneable { - private final static Logger log = Logger.getLogger(RankProfile.class.getName()); public final static String FIRST_PHASE = "firstphase"; public final static String SECOND_PHASE = "secondphase"; /** The search definition-unique name of this rank profile */ @@ -130,6 +129,8 @@ public class RankProfile implements Cloneable { /** Global onnx models not tied to a search definition */ private final OnnxModels onnxModels; + private final DeployLogger deployLogger; + /** * Creates a new rank profile for a particular search definition * @@ -144,6 +145,7 @@ public class RankProfile implements Cloneable { this.model = null; this.onnxModels = null; this.rankProfileRegistry = rankProfileRegistry; + this.deployLogger = search.getDeployLogger(); } /** @@ -152,12 +154,13 @@ public class RankProfile implements Cloneable { * @param name the name of the new profile * @param model the model owning this profile */ - public RankProfile(String name, VespaModel model, RankProfileRegistry rankProfileRegistry, OnnxModels onnxModels) { + public RankProfile(String name, VespaModel model, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, OnnxModels onnxModels) { this.name = Objects.requireNonNull(name, "name cannot be null"); this.search = null; this.model = Objects.requireNonNull(model, "model cannot be null"); this.rankProfileRegistry = rankProfileRegistry; this.onnxModels = onnxModels; + this.deployLogger = deployLogger; } public String getName() { return name; } @@ -214,7 +217,8 @@ public class RankProfile implements Cloneable { if (search.getDeployProperties().featureFlags().enforceRankProfileInheritance()) { throw new IllegalArgumentException(msg); } else { - log.warning(msg); + deployLogger.logApplicationPackage(Level.WARNING, msg); + inherited = resolveIndependentOfInheritance(); } } else { List<String> children = new ArrayList<>(); @@ -224,6 +228,12 @@ public class RankProfile implements Cloneable { } return inherited; } + private RankProfile resolveIndependentOfInheritance() { + for (RankProfile rankProfile : rankProfileRegistry.all()) { + if (rankProfile.getName().equals(inheritedName)) return rankProfile; + } + return null; + } private String createFullyQualifiedName() { return (search != null) ? (search.getName() + "." + getName()) @@ -634,6 +644,10 @@ public class RankProfile implements Cloneable { /** Adds a function and returns it */ public RankingExpressionFunction addFunction(ExpressionFunction function, boolean inline) { RankingExpressionFunction rankingExpressionFunction = new RankingExpressionFunction(function, inline); + if (functions.containsKey(function.getName())) { + deployLogger.log(Level.WARNING, "Function '" + function.getName() + "' replaces a previous function " + + "with the same name in rank profile '" + this.name + "'"); + } functions.put(function.getName(), rankingExpressionFunction); allFunctionsCached = null; return rankingExpressionFunction; diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/SearchBuilder.java b/config-model/src/main/java/com/yahoo/searchdefinition/SearchBuilder.java index f49a477c66b..9cd2c4d3bfb 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/SearchBuilder.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/SearchBuilder.java @@ -95,7 +95,10 @@ public class SearchBuilder { /** For testing only */ public SearchBuilder(RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { - this(MockApplicationPackage.createEmpty(), new MockFileRegistry(), new BaseDeployLogger(), new TestProperties(), rankProfileRegistry, queryProfileRegistry); + this(rankProfileRegistry, queryProfileRegistry, new TestProperties()); + } + public SearchBuilder(RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry, ModelContext.Properties properties) { + this(MockApplicationPackage.createEmpty(), new MockFileRegistry(), new BaseDeployLogger(), properties, rankProfileRegistry, queryProfileRegistry); } public SearchBuilder(ApplicationPackage app, diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/FunctionInliner.java b/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/FunctionInliner.java index c15ef20a455..377a90b68f9 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/FunctionInliner.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/FunctionInliner.java @@ -24,6 +24,7 @@ public class FunctionInliner extends ExpressionTransformer<RankProfileTransformC } private ExpressionNode transformFeatureNode(ReferenceNode feature, RankProfileTransformContext context) { + if (feature.getArguments().size() > 0) return feature; // From RankProfile: only inline no-arg functions RankProfile.RankingExpressionFunction rankingExpressionFunction = context.inlineFunctions().get(feature.getName()); if (rankingExpressionFunction == null) return feature; return transform(rankingExpressionFunction.function().getBody().getRoot(), context); // inline recursively and return diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java index e6687ec8ac8..c6c2fea5900 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java @@ -2,7 +2,6 @@ package com.yahoo.vespa.model; import ai.vespa.rankingexpression.importer.configmodelview.ImportedMlModel; -import ai.vespa.rankingexpression.importer.configmodelview.ImportedMlModels; import com.yahoo.collections.Pair; import com.yahoo.component.Version; import com.yahoo.config.ConfigInstance; @@ -78,6 +77,8 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; import java.util.logging.Level; import java.util.logging.Logger; import java.util.stream.Collectors; @@ -184,8 +185,7 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri VespaModelBuilder builder = new VespaDomBuilder(); root = builder.getRoot(VespaModel.ROOT_CONFIGID, deployState, this); - createGlobalRankProfiles(deployState.getDeployLogger(), deployState.getImportedModels(), - deployState.rankProfileRegistry(), deployState.getQueryProfiles()); + createGlobalRankProfiles(deployState); rankProfileList = new RankProfileList(null, // null search -> global rankingConstants, largeRankExpressions, @@ -291,18 +291,24 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri * Creates a rank profile not attached to any search definition, for each imported model in the application package, * and adds it to the given rank profile registry. */ - private void createGlobalRankProfiles(DeployLogger deployLogger, ImportedMlModels importedModels, - RankProfileRegistry rankProfileRegistry, - QueryProfiles queryProfiles) { - if ( ! importedModels.all().isEmpty()) { // models/ directory is available - for (ImportedMlModel model : importedModels.all()) { + private void createGlobalRankProfiles(DeployState deployState) { + var importedModels = deployState.getImportedModels().all(); + DeployLogger deployLogger = deployState.getDeployLogger(); + RankProfileRegistry rankProfileRegistry = deployState.rankProfileRegistry(); + QueryProfiles queryProfiles = deployState.getQueryProfiles(); + List <Future<ConvertedModel>> futureModels = new ArrayList<>(); + if ( ! importedModels.isEmpty()) { // models/ directory is available + for (ImportedMlModel model : importedModels) { // Due to automatic naming not guaranteeing unique names, there must be a 1-1 between OnnxModels and global RankProfiles. OnnxModels onnxModels = onnxModelInfoFromSource(model); - RankProfile profile = new RankProfile(model.name(), this, rankProfileRegistry, onnxModels); + RankProfile profile = new RankProfile(model.name(), this, deployLogger, rankProfileRegistry, onnxModels); rankProfileRegistry.add(profile); - ConvertedModel convertedModel = ConvertedModel.fromSource(new ModelName(model.name()), - model.name(), profile, queryProfiles.getRegistry(), model); - convertedModel.expressions().values().forEach(f -> profile.addFunction(f, false)); + futureModels.add(deployState.getExecutor().submit(() -> { + ConvertedModel convertedModel = ConvertedModel.fromSource(new ModelName(model.name()), + model.name(), profile, queryProfiles.getRegistry(), model); + convertedModel.expressions().values().forEach(f -> profile.addFunction(f, false)); + return convertedModel; + })); } } else { // generated and stored model information may be available instead @@ -312,10 +318,20 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri if (modelName.contains(".")) continue; // Name space: Not a global profile // Due to automatic naming not guaranteeing unique names, there must be a 1-1 between OnnxModels and global RankProfiles. OnnxModels onnxModels = onnxModelInfoFromStore(modelName); - RankProfile profile = new RankProfile(modelName, this, rankProfileRegistry, onnxModels); + RankProfile profile = new RankProfile(modelName, this, deployLogger, rankProfileRegistry, onnxModels); rankProfileRegistry.add(profile); - ConvertedModel convertedModel = ConvertedModel.fromStore(new ModelName(modelName), modelName, profile); - convertedModel.expressions().values().forEach(f -> profile.addFunction(f, false)); + futureModels.add(deployState.getExecutor().submit(() -> { + ConvertedModel convertedModel = ConvertedModel.fromStore(new ModelName(modelName), modelName, profile); + convertedModel.expressions().values().forEach(f -> profile.addFunction(f, false)); + return convertedModel; + })); + } + } + for (var futureConvertedModel : futureModels) { + try { + futureConvertedModel.get(); + } catch (ExecutionException |InterruptedException e) { + throw new RuntimeException(e); } } new Processing().processRankProfiles(deployLogger, rankProfileRegistry, queryProfiles, true, false); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java index 45623bbe611..0ee2c363252 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java @@ -168,6 +168,7 @@ public class VespaModelFactory implements ModelFactory { .deployLogger(modelContext.deployLogger()) .configDefinitionRepo(modelContext.configDefinitionRepo()) .fileRegistry(modelContext.getFileRegistry()) + .executor(modelContext.getExecutor()) .permanentApplicationPackage(modelContext.permanentApplicationPackage()) .properties(modelContext.properties()) .vespaVersion(version()) diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java index b0ed04f3013..f7effbc3bc7 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.model.admin.metricsproxy; import ai.vespa.metricsproxy.core.ConsumersConfig.Consumer; +import com.yahoo.config.provision.SystemName; import com.yahoo.vespa.model.admin.monitoring.Metric; import com.yahoo.vespa.model.admin.monitoring.MetricSet; import com.yahoo.vespa.model.admin.monitoring.MetricsConsumer; @@ -24,7 +25,8 @@ class ConsumersConfigGenerator { * @return a list of consumer builders (a mapping from consumer to its metrics) */ static List<Consumer.Builder> generateConsumers(MetricsConsumer defaultConsumer, - Map<String, MetricsConsumer> userConsumers) { + Map<String, MetricsConsumer> userConsumers, + SystemName systemName) { // Normally, the user given consumers should not contain VESPA_CONSUMER_ID, // but it's allowed for some internally used applications. var allConsumers = new LinkedHashMap<>(userConsumers); @@ -32,6 +34,9 @@ class ConsumersConfigGenerator { combineConsumers(defaultConsumer, allConsumers.get(MetricsConsumer.vespa.id()))); allConsumers.put(MetricsConsumer.autoscaling.id(), MetricsConsumer.autoscaling); + if (systemName.isPublic()) + allConsumers.put(MetricsConsumer.vespaCloud.id(), MetricsConsumer.vespaCloud); + return allConsumers.values().stream() .map(ConsumersConfigGenerator::toConsumerBuilder) .collect(Collectors.toList()); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java index e504bee0a30..0b2d0936235 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java @@ -158,7 +158,7 @@ public class MetricsProxyContainerCluster extends ContainerCluster<MetricsProxyC @Override public void getConfig(ConsumersConfig.Builder builder) { var amendedVespaConsumer = addMetrics(MetricsConsumer.vespa, getAdditionalDefaultMetrics().getMetrics()); - builder.consumer.addAll(generateConsumers(amendedVespaConsumer, getUserMetricsConsumers())); + builder.consumer.addAll(generateConsumers(amendedVespaConsumer, getUserMetricsConsumers(), getZone().system())); builder.consumer.add(toConsumerBuilder(MetricsConsumer.defaultConsumer)); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/MetricsConsumer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/MetricsConsumer.java index 6344e462e0f..b513c5d3021 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/MetricsConsumer.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/MetricsConsumer.java @@ -35,6 +35,8 @@ public class MetricsConsumer { // Referenced from com.yahoo.vespa.hosted.provision.autoscale.NodeMetricsFetcher public static final MetricsConsumer autoscaling = consumer("autoscaling", autoscalingMetricSet); + public static final MetricsConsumer vespaCloud = + consumer("vespa-cloud", vespaMetricSet, systemMetricSet, networkMetricSet); private final String id; private final MetricSet metricSet; diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java index 3efc50a7445..ac56143732d 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java @@ -92,6 +92,9 @@ public class MetricsBuilder { if (consumerId.equalsIgnoreCase(MetricsConsumer.autoscaling.id())) throw new IllegalArgumentException("'" + MetricsConsumer.autoscaling.id() + " is not allowed as metrics consumer id (case is ignored.)"); + if (consumerId.equalsIgnoreCase(MetricsConsumer.vespaCloud.id())) + throw new IllegalArgumentException("'" + MetricsConsumer.vespaCloud.id() + " is not allowed as metrics consumer id (case is ignored.)"); + if (metrics.hasConsumerIgnoreCase(consumerId)) throw new IllegalArgumentException("'" + consumerId + "' is used as id for two metrics consumers (case is ignored.)"); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java index 792fa3f1884..086730d5012 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java @@ -35,6 +35,7 @@ public final class ApplicationContainer extends Container implements addComponent(new SimpleComponent("com.yahoo.container.jdisc.messagebus.NetworkMultiplexerHolder")); addComponent(new SimpleComponent("com.yahoo.container.jdisc.messagebus.NetworkMultiplexerProvider")); addComponent(new SimpleComponent("com.yahoo.container.jdisc.messagebus.SessionCache")); + addComponent(new SimpleComponent("com.yahoo.container.jdisc.SystemInfoProvider")); } @Override diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java index 1203497f59f..c707a62788c 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java @@ -105,7 +105,6 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat addSimpleComponent("com.yahoo.container.jdisc.DeprecatedSecretStoreProvider"); addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider"); addSimpleComponent("com.yahoo.container.jdisc.AthenzIdentityProviderProvider"); - addSimpleComponent("com.yahoo.container.jdisc.SystemInfoProvider"); addSimpleComponent(com.yahoo.container.core.documentapi.DocumentAccessProvider.class.getName()); addMetricsHandlers(); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ml/ModelsEvaluatorTester.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ml/ModelsEvaluatorTester.java index b98cabb6f33..6d723961acb 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/ml/ModelsEvaluatorTester.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ml/ModelsEvaluatorTester.java @@ -9,7 +9,10 @@ import ai.vespa.rankingexpression.importer.tensorflow.TensorFlowImporter; import ai.vespa.rankingexpression.importer.vespa.VespaImporter; import ai.vespa.rankingexpression.importer.xgboost.XGBoostImporter; import com.google.common.collect.ImmutableList; +import com.yahoo.config.FileReference; import com.yahoo.config.application.api.ApplicationPackage; +import com.yahoo.config.application.api.FileRegistry; +import com.yahoo.config.model.application.provider.MockFileRegistry; import com.yahoo.config.model.deploy.DeployState; import com.yahoo.config.model.test.MockApplicationPackage; import com.yahoo.filedistribution.fileacquirer.FileAcquirer; @@ -21,10 +24,13 @@ import com.yahoo.vespa.config.search.core.OnnxModelsConfig; import com.yahoo.vespa.config.search.core.RankingConstantsConfig; import com.yahoo.vespa.config.search.core.RankingExpressionsConfig; import com.yahoo.vespa.model.VespaModel; +import net.jpountz.lz4.LZ4FrameOutputStream; import org.xml.sax.SAXException; import java.io.File; +import java.io.FileOutputStream; import java.io.IOException; +import java.nio.ByteBuffer; import java.nio.file.Files; import java.nio.file.Path; import java.util.HashMap; @@ -66,13 +72,14 @@ public class ModelsEvaluatorTester { File temporaryApplicationDir = null; try { temporaryApplicationDir = createTemporaryApplicationDir(modelsPath); - RankProfileList rankProfileList = createRankProfileList(temporaryApplicationDir); + MockFileRegistry fileRegistry = new MockFileBlobRegistry(temporaryApplicationDir); + RankProfileList rankProfileList = createRankProfileList(temporaryApplicationDir, fileRegistry); RankProfilesConfig rankProfilesConfig = getRankProfilesConfig(rankProfileList); RankingConstantsConfig rankingConstantsConfig = getRankingConstantConfig(rankProfileList); RankingExpressionsConfig rankingExpressionsConfig = getRankingExpressionsConfig(rankProfileList); OnnxModelsConfig onnxModelsConfig = getOnnxModelsConfig(rankProfileList); - FileAcquirer files = createFileAcquirer(rankingConstantsConfig, onnxModelsConfig, temporaryApplicationDir); + FileAcquirer files = createFileAcquirer(fileRegistry, temporaryApplicationDir); return new ModelsEvaluator(rankProfilesConfig, rankingConstantsConfig, rankingExpressionsConfig, onnxModelsConfig, files); @@ -93,12 +100,16 @@ public class ModelsEvaluatorTester { return temporaryApplicationDir; } - private static RankProfileList createRankProfileList(File appDir) throws IOException, SAXException { + private static RankProfileList createRankProfileList(File appDir, FileRegistry registry) throws IOException, SAXException { ApplicationPackage app = new MockApplicationPackage.Builder() .withEmptyHosts() .withServices(modelEvaluationServices) .withRoot(appDir).build(); - DeployState deployState = new DeployState.Builder().applicationPackage(app).modelImporters(importers).build(); + DeployState deployState = new DeployState.Builder() + .applicationPackage(app) + .fileRegistry(registry) + .modelImporters(importers).build(); + VespaModel vespaModel = new VespaModel(deployState); return vespaModel.rankProfileList(); } @@ -127,13 +138,10 @@ public class ModelsEvaluatorTester { return builder.build(); } - private static FileAcquirer createFileAcquirer(RankingConstantsConfig constantsConfig, OnnxModelsConfig onnxModelsConfig, File appDir) { + private static FileAcquirer createFileAcquirer(MockFileRegistry fileRegistry, File appDir) { Map<String, File> fileMap = new HashMap<>(); - for (RankingConstantsConfig.Constant constant : constantsConfig.constant()) { - fileMap.put(constant.fileref().value(), relativePath(appDir, constant.fileref().value())); - } - for (OnnxModelsConfig.Model model : onnxModelsConfig.model()) { - fileMap.put(model.fileref().value(), relativePath(appDir, model.fileref().value())); + for (FileRegistry.Entry entry : fileRegistry.export()) { + fileMap.put(entry.reference.value(), relativePath(appDir, entry.reference.value())); } return MockFileAcquirer.returnFiles(fileMap); } @@ -142,4 +150,34 @@ public class ModelsEvaluatorTester { return new File(root.getAbsolutePath() + File.separator + subpath); } + private static class MockFileBlobRegistry extends MockFileRegistry { + + private final File appDir; + + MockFileBlobRegistry(File appdir) { + this.appDir = appdir; + } + + @Override + public FileReference addBlob(String name, ByteBuffer blob) { + writeBlob(blob, name); + return addFile(name); + } + + private void writeBlob(ByteBuffer blob, String relativePath) { + try (FileOutputStream fos = new FileOutputStream(new File(appDir, relativePath))) { + if (relativePath.endsWith(".lz4")) { + LZ4FrameOutputStream lz4 = new LZ4FrameOutputStream(fos); + lz4.write(blob.array(), blob.arrayOffset(), blob.remaining()); + lz4.close(); + } else { + fos.write(blob.array(), blob.arrayOffset(), blob.remaining()); + } + } catch (IOException e) { + throw new IllegalArgumentException("Failed writing temp file", e); + } + } + + } + } diff --git a/config-model/src/test/cfg/application/stateless_eval/lightgbm_regression.json b/config-model/src/test/cfg/application/stateless_eval/lightgbm_regression.json new file mode 100644 index 00000000000..cf0488ecd8b --- /dev/null +++ b/config-model/src/test/cfg/application/stateless_eval/lightgbm_regression.json @@ -0,0 +1,275 @@ +{ + "name": "tree", + "version": "v3", + "num_class": 1, + "num_tree_per_iteration": 1, + "label_index": 0, + "max_feature_idx": 3, + "average_output": false, + "objective": "regression", + "feature_names": [ + "numerical_1", + "numerical_2", + "categorical_1", + "categorical_2" + ], + "monotone_constraints": [], + "tree_info": [ + { + "tree_index": 0, + "num_leaves": 3, + "num_cat": 1, + "shrinkage": 1, + "tree_structure": { + "split_index": 0, + "split_feature": 1, + "split_gain": 68.5353012084961, + "threshold": 0.46643291586559305, + "decision_type": "<=", + "default_left": true, + "missing_type": "NaN", + "internal_value": 0, + "internal_weight": 0, + "internal_count": 1000, + "left_child": { + "leaf_index": 0, + "leaf_value": 2.1594397038037663, + "leaf_weight": 469, + "leaf_count": 469 + }, + "right_child": { + "split_index": 1, + "split_feature": 3, + "split_gain": 41.27640151977539, + "threshold": "2||3||4", + "decision_type": "==", + "default_left": false, + "missing_type": "NaN", + "internal_value": 0.246035, + "internal_weight": 531, + "internal_count": 531, + "left_child": { + "leaf_index": 1, + "leaf_value": 2.235297305276056, + "leaf_weight": 302, + "leaf_count": 302 + }, + "right_child": { + "leaf_index": 2, + "leaf_value": 2.1792953471546546, + "leaf_weight": 229, + "leaf_count": 229 + } + } + } + }, + { + "tree_index": 1, + "num_leaves": 3, + "num_cat": 1, + "shrinkage": 0.1, + "tree_structure": { + "split_index": 0, + "split_feature": 2, + "split_gain": 64.22250366210938, + "threshold": "3||4", + "decision_type": "==", + "default_left": false, + "missing_type": "NaN", + "internal_value": 0, + "internal_weight": 0, + "internal_count": 1000, + "left_child": { + "leaf_index": 0, + "leaf_value": 0.03070842919354316, + "leaf_weight": 399, + "leaf_count": 399 + }, + "right_child": { + "split_index": 1, + "split_feature": 0, + "split_gain": 36.74250030517578, + "threshold": 0.5102250691730842, + "decision_type": "<=", + "default_left": true, + "missing_type": "NaN", + "internal_value": -0.204906, + "internal_weight": 601, + "internal_count": 601, + "left_child": { + "leaf_index": 1, + "leaf_value": -0.04439151147520909, + "leaf_weight": 315, + "leaf_count": 315 + }, + "right_child": { + "leaf_index": 2, + "leaf_value": 0.005117411709368601, + "leaf_weight": 286, + "leaf_count": 286 + } + } + } + }, + { + "tree_index": 2, + "num_leaves": 3, + "num_cat": 0, + "shrinkage": 0.1, + "tree_structure": { + "split_index": 0, + "split_feature": 1, + "split_gain": 57.1327018737793, + "threshold": 0.668665477622446, + "decision_type": "<=", + "default_left": true, + "missing_type": "NaN", + "internal_value": 0, + "internal_weight": 0, + "internal_count": 1000, + "left_child": { + "split_index": 1, + "split_feature": 1, + "split_gain": 40.859100341796875, + "threshold": 0.008118820676863816, + "decision_type": "<=", + "default_left": true, + "missing_type": "NaN", + "internal_value": -0.162926, + "internal_weight": 681, + "internal_count": 681, + "left_child": { + "leaf_index": 0, + "leaf_value": -0.15361238490967524, + "leaf_weight": 21, + "leaf_count": 21 + }, + "right_child": { + "leaf_index": 2, + "leaf_value": -0.01192330846157292, + "leaf_weight": 660, + "leaf_count": 660 + } + }, + "right_child": { + "leaf_index": 1, + "leaf_value": 0.03499044894987518, + "leaf_weight": 319, + "leaf_count": 319 + } + } + }, + { + "tree_index": 3, + "num_leaves": 3, + "num_cat": 1, + "shrinkage": 0.1, + "tree_structure": { + "split_index": 0, + "split_feature": 0, + "split_gain": 54.77090072631836, + "threshold": 0.5201391072644542, + "decision_type": "<=", + "default_left": true, + "missing_type": "NaN", + "internal_value": 0, + "internal_weight": 0, + "internal_count": 1000, + "left_child": { + "leaf_index": 0, + "leaf_value": -0.02141000620783247, + "leaf_weight": 543, + "leaf_count": 543 + }, + "right_child": { + "split_index": 1, + "split_feature": 2, + "split_gain": 27.200700759887695, + "threshold": "0||1", + "decision_type": "==", + "default_left": false, + "missing_type": "NaN", + "internal_value": 0.255704, + "internal_weight": 457, + "internal_count": 457, + "left_child": { + "leaf_index": 1, + "leaf_value": -0.004121485787596721, + "leaf_weight": 191, + "leaf_count": 191 + }, + "right_child": { + "leaf_index": 2, + "leaf_value": 0.04534090904886873, + "leaf_weight": 266, + "leaf_count": 266 + } + } + } + }, + { + "tree_index": 4, + "num_leaves": 3, + "num_cat": 1, + "shrinkage": 0.1, + "tree_structure": { + "split_index": 0, + "split_feature": 3, + "split_gain": 51.84349822998047, + "threshold": "2||3||4", + "decision_type": "==", + "default_left": false, + "missing_type": "NaN", + "internal_value": 0, + "internal_weight": 0, + "internal_count": 1000, + "left_child": { + "split_index": 1, + "split_feature": 1, + "split_gain": 39.352699279785156, + "threshold": 0.27283279016959255, + "decision_type": "<=", + "default_left": true, + "missing_type": "NaN", + "internal_value": 0.188414, + "internal_weight": 593, + "internal_count": 593, + "left_child": { + "leaf_index": 0, + "leaf_value": -0.01924803254356527, + "leaf_weight": 184, + "leaf_count": 184 + }, + "right_child": { + "leaf_index": 2, + "leaf_value": 0.03643772842347651, + "leaf_weight": 409, + "leaf_count": 409 + } + }, + "right_child": { + "leaf_index": 1, + "leaf_value": -0.02701711918923075, + "leaf_weight": 407, + "leaf_count": 407 + } + } + } + ], + "pandas_categorical": [ + [ + "a", + "b", + "c", + "d", + "e" + ], + [ + "i", + "j", + "k", + "l", + "m" + ] + ] +}
\ No newline at end of file diff --git a/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java b/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java index 59af3193b79..8c4c6aa7fc0 100644 --- a/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java +++ b/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java @@ -2,6 +2,7 @@ package com.yahoo.config.model; import com.yahoo.component.Version; +import com.yahoo.concurrent.InThreadExecutorService; import com.yahoo.config.application.api.ApplicationPackage; import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.application.api.FileRegistry; @@ -18,6 +19,7 @@ import com.yahoo.config.model.deploy.TestProperties; import com.yahoo.config.model.test.MockApplicationPackage; import java.util.Optional; +import java.util.concurrent.ExecutorService; /** * @author hmusum @@ -83,4 +85,8 @@ public class MockModelContext implements ModelContext { return new TestProperties(); } + @Override + public ExecutorService getExecutor() { + return new InThreadExecutorService(); + } } diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java index 8f3fbfc9de9..69789d09dc2 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java @@ -118,8 +118,13 @@ public class RankProfileTestCase extends SchemaTestCase { @Test public void requireThatSidewaysInheritanceIsImpossible() throws ParseException { + verifySidewaysInheritance(false); + verifySidewaysInheritance(true); + } + private void verifySidewaysInheritance(boolean enforce) throws ParseException { RankProfileRegistry registry = new RankProfileRegistry(); - SearchBuilder builder = new SearchBuilder(registry, setupQueryProfileTypes()); + SearchBuilder builder = new SearchBuilder(registry, setupQueryProfileTypes(), + new TestProperties().enforceRankProfileInheritance(enforce)); builder.importString(joinLines( "schema child1 {", " document child1 {", @@ -163,7 +168,15 @@ public class RankProfileTestCase extends SchemaTestCase { "}")); try { builder.build(true); + if (enforce) { + fail("Sideways inheritance should have been enforced"); + } else { + assertNotNull(builder.getSearch("child2")); + assertNotNull(builder.getSearch("child1")); + assertTrue(registry.get("child1", "child").inherits("parent")); + } } catch (IllegalArgumentException e) { + if (!enforce) fail("Sideways inheritance should have been allowed"); assertEquals("rank-profile 'child' inherits 'parent', but it does not exist anywhere in the inheritance of search 'child1'.", e.getMessage()); } } diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionInliningTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionInliningTestCase.java index d7143281977..d87278a9ca1 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionInliningTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionInliningTestCase.java @@ -2,8 +2,10 @@ package com.yahoo.searchdefinition; import com.yahoo.collections.Pair; +import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.model.application.provider.MockFileRegistry; import com.yahoo.config.model.deploy.TestProperties; +import com.yahoo.config.model.test.MockApplicationPackage; import com.yahoo.search.query.profile.QueryProfileRegistry; import com.yahoo.searchdefinition.derived.AttributeFields; import com.yahoo.searchdefinition.derived.RawRankProfile; @@ -11,7 +13,9 @@ import com.yahoo.searchdefinition.parser.ParseException; import ai.vespa.rankingexpression.importer.configmodelview.ImportedMlModels; import org.junit.Test; +import java.util.ArrayList; import java.util.Optional; +import java.util.logging.Level; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -26,7 +30,7 @@ public class RankingExpressionInliningTestCase extends SchemaTestCase { RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); SearchBuilder builder = new SearchBuilder(rankProfileRegistry); builder.importString( - "search test {\n" + + "search test {\n" + " document test { \n" + " field a type double { \n" + " indexing: attribute \n" + @@ -186,6 +190,39 @@ public class RankingExpressionInliningTestCase extends SchemaTestCase { assertEquals("attribute(b) + 1", getRankingExpression("D", test, s)); } + @Test + public void testFunctionInliningWithReplacement() throws ParseException { + RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); + MockDeployLogger deployLogger = new MockDeployLogger(); + SearchBuilder builder = new SearchBuilder(MockApplicationPackage.createEmpty(), + new MockFileRegistry(), + deployLogger, + new TestProperties(), + rankProfileRegistry, + new QueryProfileRegistry()); + builder.importString( + "search test {\n" + + " document test { }\n" + + " rank-profile test {\n" + + " first-phase {\n" + + " expression: foo\n" + + " }\n" + + " function foo(x) {\n" + + " expression: x + x\n" + + " }\n" + + " function inline foo() {\n" + // replaces previous "foo" during parsing + " expression: foo(2)\n" + + " }\n" + + " }\n" + + "}\n"); + builder.build(); + Search s = builder.getSearch(); + RankProfile test = rankProfileRegistry.get(s, "test").compile(new QueryProfileRegistry(), new ImportedMlModels()); + assertEquals("foo(2)", test.getFirstPhaseRanking().getRoot().toString()); + assertTrue("Does not contain expected warning", deployLogger.contains("Function 'foo' replaces " + + "a previous function with the same name in rank profile 'test'")); + } + /** * Expression evaluation has no stack so function arguments are bound at config time creating a separate version of * each function for each binding, using hashes to name the bound variants of the function. @@ -221,4 +258,17 @@ public class RankingExpressionInliningTestCase extends SchemaTestCase { return censorBindingHash(rankExpression.get()); } + private static class MockDeployLogger implements DeployLogger { + private final ArrayList<String> msgs = new ArrayList<>(); + + @Override + public void log(Level level, String message) { + msgs.add(message); + } + + public boolean contains(String expected) { + return msgs.stream().anyMatch(msg -> msg.equals(expected)); + } + } + } diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/ExportingTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/ExportingTestCase.java index 8ef04752800..12263521dcb 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/ExportingTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/ExportingTestCase.java @@ -9,7 +9,6 @@ import org.junit.Test; import java.io.IOException; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; /** * Tests exporting @@ -106,7 +105,7 @@ public class ExportingTestCase extends AbstractExportingTestCase { @Test public void testRankExpression() throws IOException, ParseException { assertCorrectDeriving("rankexpression", null, - new TestProperties().useExternalRankExpression(true).largeRankExpressionLimit(1024), new TestableDeployLogger()); + new TestProperties().largeRankExpressionLimit(1024), new TestableDeployLogger()); } @Test diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankProfileSearchFixture.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankProfileSearchFixture.java index 010b33597f3..9c363ea0628 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankProfileSearchFixture.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankProfileSearchFixture.java @@ -24,6 +24,8 @@ import ai.vespa.rankingexpression.importer.xgboost.XGBoostImporter; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import static org.junit.Assert.assertEquals; @@ -43,6 +45,7 @@ class RankProfileSearchFixture { private final QueryProfileRegistry queryProfileRegistry; private final Search search; private final Map<String, RankProfile> compiledRankProfiles = new HashMap<>(); + private final ExecutorService executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); public RankProfileRegistry getRankProfileRegistry() { return rankProfileRegistry; @@ -105,7 +108,7 @@ class RankProfileSearchFixture { public RankProfile compileRankProfile(String rankProfile, Path applicationDir) { RankProfile compiled = rankProfileRegistry.get(search, rankProfile) .compile(queryProfileRegistry, - new ImportedMlModels(applicationDir.toFile(), importers)); + new ImportedMlModels(applicationDir.toFile(), executor, importers)); compiledRankProfiles.put(rankProfile, compiled); return compiled; } diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java index 00ac5ac5405..b81fe7a02cc 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java @@ -36,7 +36,7 @@ public class RankingExpressionsTestCase extends SchemaTestCase { @Test public void testFunctions() throws IOException, ParseException { - ModelContext.Properties deployProperties = new TestProperties().useExternalRankExpression(true); + ModelContext.Properties deployProperties = new TestProperties(); RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); Search search = createSearch("src/test/examples/rankingexpressionfunction", deployProperties, rankProfileRegistry); RankProfile functionsRankProfile = rankProfileRegistry.get(search, "macros"); @@ -115,7 +115,7 @@ public class RankingExpressionsTestCase extends SchemaTestCase { @Test public void testLargeInheritedFunctions() throws IOException, ParseException { - ModelContext.Properties properties = new TestProperties().useExternalRankExpression(true).largeRankExpressionLimit(50); + ModelContext.Properties properties = new TestProperties().largeRankExpressionLimit(50); RankProfileRegistry rankProfileRegistry = new RankProfileRegistry(); LargeRankExpressions largeExpressions = new LargeRankExpressions(new MockFileRegistry()); QueryProfileRegistry queryProfiles = new QueryProfileRegistry(); diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/ml/ModelsEvaluatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/ml/ModelsEvaluatorTest.java index 771cba673bc..e6d3b5dc140 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/ml/ModelsEvaluatorTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/ml/ModelsEvaluatorTest.java @@ -18,7 +18,7 @@ public class ModelsEvaluatorTest { @Test public void testModelsEvaluatorTester() { ModelsEvaluator modelsEvaluator = ModelsEvaluatorTester.create("src/test/cfg/application/stateless_eval"); - assertEquals(2, modelsEvaluator.models().size()); + assertEquals(3, modelsEvaluator.models().size()); // ONNX model evaluation FunctionEvaluator mul = modelsEvaluator.evaluatorOf("mul"); @@ -27,6 +27,12 @@ public class ModelsEvaluatorTest { Tensor output = mul.bind("input1", input1).bind("input2", input2).evaluate(); assertEquals(6.0, output.sum().asDouble(), 1e-9); + // LightGBM model evaluation + FunctionEvaluator lgbm = modelsEvaluator.evaluatorOf("lightgbm_regression"); + lgbm.bind("numerical_1", 0.1).bind("numerical_2", 0.2).bind("categorical_1", "a").bind("categorical_2", "i"); + output = lgbm.evaluate(); + assertEquals(2.0547, output.sum().asDouble(), 1e-4); + // Vespa model evaluation FunctionEvaluator foo1 = modelsEvaluator.evaluatorOf("example", "foo1"); input1 = Tensor.from("tensor(name{},x[3]):{{name:n,x:0}:1,{name:n,x:1}:2,{name:n,x:2}:3 }"); diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServer.java index 3700dee92e3..2d9f4bd7fd0 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServer.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ConfigProxyRpcServer.java @@ -12,7 +12,6 @@ import com.yahoo.jrt.StringValue; import com.yahoo.jrt.Supervisor; import com.yahoo.jrt.Target; import com.yahoo.jrt.TargetWatcher; -import java.util.logging.Level; import com.yahoo.vespa.config.JRTMethods; import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; @@ -23,6 +22,7 @@ import java.util.Iterator; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import java.util.logging.Level; import java.util.logging.Logger; /** @@ -353,7 +353,7 @@ public class ConfigProxyRpcServer implements Runnable, TargetWatcher, RpcServer request.addOkResponse(config.getPayload(), config.getGeneration(), config.applyOnRestart(), - config.getConfigMd5()); + config.getPayloadChecksums()); log.log(Level.FINE, () -> "Return response: " + request.getShortDescription() + ",configMd5=" + config.getConfigMd5() + ",generation=" + config.getGeneration()); log.log(Level.FINEST, () -> "Config payload in response for " + request.getShortDescription() + ":" + config.getPayload()); diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ConfigTester.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ConfigTester.java index 1b009b80fc1..45f52479cbd 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ConfigTester.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ConfigTester.java @@ -5,6 +5,8 @@ import com.yahoo.jrt.Request; import com.yahoo.slime.Slime; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ConfigPayload; +import com.yahoo.vespa.config.PayloadChecksum; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.protocol.CompressionType; import com.yahoo.vespa.config.protocol.DefContent; @@ -19,6 +21,9 @@ import java.util.Collections; import java.util.List; import java.util.Optional; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; + /** * @author bratseth */ @@ -42,7 +47,8 @@ public class ConfigTester { long generation = 1; String defMd5 = ConfigUtils.getDefMd5(defContent); - String configMd5 = ConfigUtils.getMd5(fooConfigPayload); + PayloadChecksums configMd5 = PayloadChecksums.from(new PayloadChecksum(ConfigUtils.getMd5(fooConfigPayload), MD5), + PayloadChecksum.empty(XXHASH64)); fooConfig = new RawConfig(configKey, defMd5, fooPayload, configMd5, generation, false, defContent, Optional.empty()); @@ -57,24 +63,24 @@ public class ConfigTester { JRTServerConfigRequest createRequest(RawConfig config, long timeout) { return createRequest(config.getName(), config.getConfigId(), config.getNamespace(), - config.getConfigMd5(), config.getGeneration(), timeout); + config.getPayloadChecksums(), config.getGeneration(), timeout); } JRTServerConfigRequest createRequest(String configName, String configId, String namespace, long timeout) { - return createRequest(configName, configId, namespace, null, 0, timeout); + return createRequest(configName, configId, namespace, PayloadChecksums.empty(), 0, timeout); } private JRTServerConfigRequest createRequest(String configName, String configId, String namespace, - String md5, + PayloadChecksums payloadChecksums, long generation, long timeout) { Request request = JRTClientConfigRequestV3. createWithParams(new ConfigKey<>(configName, configId, namespace, null), DefContent.fromList(defContent), "fromHost", - md5, + payloadChecksums, generation, timeout, Trace.createDummy(), diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MemoryCacheTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MemoryCacheTest.java index 485a091d9ae..b47c0bcc5ce 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MemoryCacheTest.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/MemoryCacheTest.java @@ -1,10 +1,11 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.proxy; import com.yahoo.slime.Slime; import com.yahoo.vespa.config.ConfigCacheKey; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ConfigPayload; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.protocol.Payload; import org.junit.Before; @@ -18,24 +19,24 @@ import static org.junit.Assert.*; /** * @author hmusum - * @since 5.1.9 */ public class MemoryCacheTest { - private String defName = "foo"; - private String configId = "id"; - private String namespace = "bar"; + + private final String defName = "foo"; + private final String configId = "id"; + private final String namespace = "bar"; private static final String defMd5 = "a"; - private long generation = 1L; - private String defName2 = "baz-quux"; - private String namespace2 = "search.config"; + private final long generation = 1L; + private final String defName2 = "baz-quux"; + private final String namespace2 = "search.config"; // Test with a config id with / in it - private String configId2 = "clients/gateways/gateway/component/com.yahoo.feedhandler.VespaFeedHandlerRemoveLocation"; + private final String configId2 = "clients/gateways/gateway/component/com.yahoo.feedhandler.VespaFeedHandlerRemoveLocation"; private static final String defMd52 = "a2"; private static final String differentDefMd5 = "09ef"; - private static final String configMd5 = "b"; - private ConfigKey<?> configKey = new ConfigKey<>(defName, configId, namespace); - private ConfigKey<?> configKey2 = new ConfigKey<>(defName2, configId2, namespace2); + private static final PayloadChecksums configMd5 = PayloadChecksums.from("b", ""); + private final ConfigKey<?> configKey = new ConfigKey<>(defName, configId, namespace); + private final ConfigKey<?> configKey2 = new ConfigKey<>(defName2, configId2, namespace2); private ConfigCacheKey cacheKey; private ConfigCacheKey cacheKeyDifferentMd5; private ConfigCacheKey cacheKey2; diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ProxyServerTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ProxyServerTest.java index 87c1fa151f8..32e68c662e8 100644 --- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ProxyServerTest.java +++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/ProxyServerTest.java @@ -35,7 +35,7 @@ public class ProxyServerTest { // errorConfig based on fooConfig private static final ConfigKey<?> errorConfigKey = new ConfigKey<>("error", fooConfig.getConfigId(), fooConfig.getNamespace()); static final RawConfig errorConfig = new RawConfig(errorConfigKey, fooConfig.getDefMd5(), fooConfig.getPayload(), - fooConfig.getConfigMd5(), fooConfig.getGeneration(), false, + fooConfig.getPayloadChecksums(), fooConfig.getGeneration(), false, ErrorCode.UNKNOWN_DEFINITION, fooConfig.getDefContent(), Optional.empty()); @Rule @@ -179,7 +179,7 @@ public class ProxyServerTest { // Simulate an empty response RawConfig emptyConfig = new RawConfig(fooConfig.getKey(), fooConfig.getDefMd5(), Payload.from("{}"), - fooConfig.getConfigMd5(), 0, false, + fooConfig.getPayloadChecksums(), 0, false, 0, fooConfig.getDefContent(), Optional.empty()); source.put(fooConfig.getKey(), emptyConfig); @@ -238,7 +238,7 @@ public class ProxyServerTest { static RawConfig createConfigWithNextConfigGeneration(RawConfig config, int errorCode, Payload payload, long configGeneration) { return new RawConfig(config.getKey(), config.getDefMd5(), - payload, config.getConfigMd5(), + payload, config.getPayloadChecksums(), configGeneration, false, errorCode, config.getDefContent(), Optional.empty()); } diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java index 53a9f3f9f94..6a81c2279d1 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java @@ -11,6 +11,7 @@ import com.yahoo.config.subscription.FileSource; import com.yahoo.config.subscription.JarSource; import com.yahoo.config.subscription.RawSource; import com.yahoo.vespa.config.ConfigKey; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.TimingValues; import com.yahoo.vespa.config.protocol.DefContent; @@ -18,6 +19,8 @@ import java.io.File; import java.util.concurrent.atomic.AtomicReference; import java.util.logging.Logger; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; + /** * Represents one active subscription to one config * @@ -40,31 +43,31 @@ public abstract class ConfigSubscription<T extends ConfigInstance> { private final T config; private final Long generation; private final boolean applyOnRestart; - private final PayloadChecksum payloadChecksum; + private final PayloadChecksums payloadChecksums; private ConfigState(boolean generationChanged, Long generation, boolean applyOnRestart, boolean configChanged, T config, - PayloadChecksum payloadChecksum) { + PayloadChecksums payloadChecksums) { this.generationChanged = generationChanged; this.generation = generation; this.applyOnRestart = applyOnRestart; this.configChanged = configChanged; this.config = config; - this.payloadChecksum = payloadChecksum; + this.payloadChecksums = payloadChecksums; } - private ConfigState(Long generation, T config, PayloadChecksum payloadChecksum) { - this(false, generation, false, false, config, payloadChecksum); + private ConfigState(Long generation, T config, PayloadChecksums payloadChecksums) { + this(false, generation, false, false, config, payloadChecksums); } private ConfigState() { - this(false, 0L, false, false, null, PayloadChecksum.empty()); + this(false, 0L, false, false, null, PayloadChecksums.empty()); } - private ConfigState<T> createUnchanged() { return new ConfigState<>(generation, config, payloadChecksum); } + private ConfigState<T> createUnchanged() { return new ConfigState<>(generation, config, payloadChecksums); } public boolean isConfigChanged() { return configChanged; } @@ -76,7 +79,7 @@ public abstract class ConfigSubscription<T extends ConfigInstance> { public T getConfig() { return config; } - public PayloadChecksum getChecksum() { return payloadChecksum; } + public PayloadChecksums getChecksums() { return payloadChecksums; } } @@ -102,6 +105,7 @@ public abstract class ConfigSubscription<T extends ConfigInstance> { this.configClass = key.getConfigClass(); this.subscriber = subscriber; this.config.set(new ConfigState<>()); + getConfigState().getChecksums().removeChecksumsOfType(MD5); // TODO: Temporary until we don't use md5 anymore } /** @@ -195,8 +199,8 @@ public abstract class ConfigSubscription<T extends ConfigInstance> { return !prev.getGeneration().equals(requiredGen) || prev.isConfigChanged(); } - void setConfig(Long generation, boolean applyOnRestart, T config, PayloadChecksum payloadChecksum) { - this.config.set(new ConfigState<>(true, generation, applyOnRestart, true, config, payloadChecksum)); + void setConfig(Long generation, boolean applyOnRestart, T config, PayloadChecksums payloadChecksums) { + this.config.set(new ConfigState<>(true, generation, applyOnRestart, true, config, payloadChecksums)); } /** @@ -204,22 +208,22 @@ public abstract class ConfigSubscription<T extends ConfigInstance> { */ protected void setConfigIncGen(T config) { ConfigState<T> prev = this.config.get(); - this.config.set(new ConfigState<>(true, prev.getGeneration() + 1, prev.applyOnRestart(), true, config, prev.payloadChecksum)); + this.config.set(new ConfigState<>(true, prev.getGeneration() + 1, prev.applyOnRestart(), true, config, prev.payloadChecksums)); } protected void setConfigIfChanged(T config) { ConfigState<T> prev = this.config.get(); - this.config.set(new ConfigState<>(true, prev.getGeneration(), prev.applyOnRestart(), !config.equals(prev.getConfig()), config, prev.payloadChecksum)); + this.config.set(new ConfigState<>(true, prev.getGeneration(), prev.applyOnRestart(), !config.equals(prev.getConfig()), config, prev.payloadChecksums)); } void setGeneration(Long generation) { ConfigState<T> prev = config.get(); - this.config.set(new ConfigState<>(true, generation, prev.applyOnRestart(), prev.isConfigChanged(), prev.getConfig(), prev.payloadChecksum)); + this.config.set(new ConfigState<>(true, generation, prev.applyOnRestart(), prev.isConfigChanged(), prev.getConfig(), prev.payloadChecksums)); } void setApplyOnRestart(boolean applyOnRestart) { ConfigState<T> prev = config.get(); - this.config.set(new ConfigState<>(prev.isGenerationChanged(), prev.getGeneration(), applyOnRestart, prev.isConfigChanged(), prev.getConfig(), prev.payloadChecksum)); + this.config.set(new ConfigState<>(prev.isGenerationChanged(), prev.getGeneration(), applyOnRestart, prev.isConfigChanged(), prev.getConfig(), prev.payloadChecksums)); } /** diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/GenericJRTConfigSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/GenericJRTConfigSubscription.java index 05d4a33c02a..e9a40539bf0 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/GenericJRTConfigSubscription.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/GenericJRTConfigSubscription.java @@ -33,7 +33,7 @@ public class GenericJRTConfigSubscription extends JRTConfigSubscription<RawConfi @Override protected void setNewConfig(JRTClientConfigRequest jrtReq) { RawConfig rawConfig = RawConfig.createFromResponseParameters(jrtReq); - setConfig(jrtReq.getNewGeneration(), jrtReq.responseIsApplyOnRestart(), rawConfig, new PayloadChecksum(jrtReq.getNewConfigMd5())); + setConfig(jrtReq.getNewGeneration(), jrtReq.responseIsApplyOnRestart(), rawConfig, jrtReq.getNewChecksums()); log.log(FINE, () -> "in setNewConfig, config=" + this.getConfigState().getConfig()); } diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigSubscription.java index b06f986555c..bb1a154d5d0 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigSubscription.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigSubscription.java @@ -111,7 +111,7 @@ public class JRTConfigSubscription<T extends ConfigInstance> extends ConfigSubsc } catch (IllegalArgumentException e) { badConfigE = e; } - setConfig(jrtReq.getNewGeneration(), jrtReq.responseIsApplyOnRestart(), configInstance, new PayloadChecksum(jrtReq.getNewConfigMd5())); + setConfig(jrtReq.getNewGeneration(), jrtReq.responseIsApplyOnRestart(), configInstance, jrtReq.getNewChecksums()); if (badConfigE != null) { throw new IllegalArgumentException("Bad config from jrt", badConfigE); } diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/JarConfigSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/JarConfigSubscription.java index b7198b0c694..4a55c046f13 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/JarConfigSubscription.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/JarConfigSubscription.java @@ -9,6 +9,7 @@ import com.yahoo.config.subscription.ConfigSubscriber; import com.yahoo.io.IOUtils; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ConfigPayload; +import com.yahoo.vespa.config.PayloadChecksums; import java.io.IOException; import java.io.InputStreamReader; @@ -63,7 +64,7 @@ public class JarConfigSubscription<T extends ConfigInstance> extends ConfigSubsc } catch (IOException e) { throw new ConfigurationRuntimeException(e); } - setConfig(0L, false, config, PayloadChecksum.empty()); + setConfig(0L, false, config, PayloadChecksums.empty()); try { jarFile.close(); } catch (IOException e) { diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/MockConnection.java b/config/src/main/java/com/yahoo/config/subscription/impl/MockConnection.java index bed7a0fa3c4..e9e7f3e7bce 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/MockConnection.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/MockConnection.java @@ -7,9 +7,9 @@ import com.yahoo.jrt.Supervisor; import com.yahoo.vespa.config.ConfigPayload; import com.yahoo.vespa.config.Connection; import com.yahoo.vespa.config.ConnectionPool; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.protocol.JRTServerConfigRequestV3; import com.yahoo.vespa.config.protocol.Payload; -import com.yahoo.vespa.config.util.ConfigUtils; /** * For unit testing @@ -96,7 +96,7 @@ public class MockConnection implements ConnectionPool, Connection { JRTServerConfigRequestV3 jrtReq = JRTServerConfigRequestV3.createFromRequest(request); Payload payload = Payload.from(ConfigPayload.empty()); long generation = 1; - jrtReq.addOkResponse(payload, generation, false, ConfigUtils.getMd5(payload.getData())); + jrtReq.addOkResponse(payload, generation, false, PayloadChecksums.fromPayload(payload)); } } diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/PayloadChecksum.java b/config/src/main/java/com/yahoo/config/subscription/impl/PayloadChecksum.java deleted file mode 100644 index 93b85aaabd0..00000000000 --- a/config/src/main/java/com/yahoo/config/subscription/impl/PayloadChecksum.java +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.config.subscription.impl; - -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * Checksums of config definition payload or config payload, - * md5 and xxhash64 are the supported types at the moment. - * - * @author hmusum - */ -public class PayloadChecksum { - - private static final Pattern hexChecksumPattern = Pattern.compile("[0-9a-fA-F]+"); - - private final String checksum; - private final Type type; - - public PayloadChecksum(String checksum) { - this.checksum = checksum; - this.type = Type.MD5; - } - - public static PayloadChecksum empty() { - return new PayloadChecksum(""); - } - - public String asString() { return checksum; } - - public Type type() { return type; } - - public enum Type {MD5, XXHASH64} - - public boolean valid() { - if (checksum.equals("")) return true; // Empty checksum is ok (e.g. when running 'vespa-get-config') - - if (type == Type.MD5 && checksum.length() != 32) { - return false; - } else if (type == Type.XXHASH64 && checksum.length() != 16) { - return false; - } - - Matcher m = hexChecksumPattern.matcher(checksum); - return m.matches(); - } - -} diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/RawConfigSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/RawConfigSubscription.java index 91b674da3d2..8d5e7839086 100644 --- a/config/src/main/java/com/yahoo/config/subscription/impl/RawConfigSubscription.java +++ b/config/src/main/java/com/yahoo/config/subscription/impl/RawConfigSubscription.java @@ -7,6 +7,7 @@ import com.yahoo.config.subscription.ConfigInterruptedException; import com.yahoo.config.subscription.ConfigSubscriber; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ConfigPayload; +import com.yahoo.vespa.config.PayloadChecksums; import java.util.Arrays; @@ -35,7 +36,7 @@ public class RawConfigSubscription<T extends ConfigInstance> extends ConfigSubsc if (payload == null) { payload = inputPayload; ConfigPayload configPayload = new CfgConfigPayloadBuilder().deserialize(Arrays.asList(payload.split("\n"))); - setConfig(0L, false, configPayload.toInstance(configClass, key.getConfigId()), PayloadChecksum.empty()); + setConfig(0L, false, configPayload.toInstance(configClass, key.getConfigId()), PayloadChecksums.empty()); return true; } try { diff --git a/config/src/main/java/com/yahoo/vespa/config/GetConfigRequest.java b/config/src/main/java/com/yahoo/vespa/config/GetConfigRequest.java index 4e90ce532e4..35b503416ce 100644 --- a/config/src/main/java/com/yahoo/vespa/config/GetConfigRequest.java +++ b/config/src/main/java/com/yahoo/vespa/config/GetConfigRequest.java @@ -44,4 +44,11 @@ public interface GetConfigRequest { */ String getRequestDefMd5(); + /** + * Returns the payload checksums from the config request. + * + * @return the payload checksums from request. + */ + PayloadChecksums configPayloadChecksums(); + } diff --git a/config/src/main/java/com/yahoo/vespa/config/PayloadChecksum.java b/config/src/main/java/com/yahoo/vespa/config/PayloadChecksum.java new file mode 100644 index 00000000000..177fb57116c --- /dev/null +++ b/config/src/main/java/com/yahoo/vespa/config/PayloadChecksum.java @@ -0,0 +1,91 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.config; + +import com.yahoo.text.AbstractUtf8Array; +import com.yahoo.vespa.config.protocol.Payload; +import com.yahoo.vespa.config.util.ConfigUtils; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; + +/** + * Checksums of config definition payload or config payload, + * md5 and xxhash64 are the supported types at the moment. + * + * @author hmusum + */ +public class PayloadChecksum { + + private static final Pattern hexChecksumPattern = Pattern.compile("[0-9a-fA-F]+"); + + private final String checksum; + private final Type type; + + public PayloadChecksum(String checksum, Type type) { + this.checksum = checksum; + this.type = type; + } + + public static PayloadChecksum empty(Type type) { + return new PayloadChecksum("", type); + } + + public static PayloadChecksum fromPayload(Payload payload, Type type) { + switch (type) { + case MD5: return fromMd5Data(payload.getData()); + case XXHASH64: return fromXxhash64Data(payload.getData()); + default: throw new IllegalArgumentException("Unknown type " + type); + } + } + + private static PayloadChecksum fromMd5Data(AbstractUtf8Array data) { + return new PayloadChecksum(ConfigUtils.getMd5(data), MD5); + } + + private static PayloadChecksum fromXxhash64Data(AbstractUtf8Array data) { + return new PayloadChecksum(ConfigUtils.getXxhash64(data), XXHASH64); + } + + public boolean isEmpty() { + switch (type) { + case MD5: return this.equals(empty(MD5)); + case XXHASH64: return this.equals(empty(XXHASH64)); + default: throw new IllegalArgumentException("Unknown type " + type); + } + } + + public String asString() { return checksum; } + + public Type type() { return type; } + + public enum Type {MD5, XXHASH64} + + public boolean valid() { + if (checksum.equals("")) return true; // Empty checksum is ok (e.g. when running 'vespa-get-config') + + Matcher m = hexChecksumPattern.matcher(checksum); + return m.matches(); + } + + @Override + public int hashCode() { + return Objects.hash(checksum, type); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PayloadChecksum that = (PayloadChecksum) o; + return Objects.equals(checksum, that.checksum) && type == that.type; + } + + @Override + public String toString() { + return type.name() + ":" + checksum; + } +} diff --git a/config/src/main/java/com/yahoo/vespa/config/PayloadChecksums.java b/config/src/main/java/com/yahoo/vespa/config/PayloadChecksums.java new file mode 100644 index 00000000000..d30e5b055bc --- /dev/null +++ b/config/src/main/java/com/yahoo/vespa/config/PayloadChecksums.java @@ -0,0 +1,87 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.config; + +import com.yahoo.vespa.config.protocol.Payload; +import com.yahoo.vespa.config.util.ConfigUtils; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; + +/** + * Checksums for config payload, typically 1 for each PayloadChecksum type (md5 and xxhash64). + * Initialized with empty checksum for each existing type. + * + * @author hmusum + */ +public class PayloadChecksums { + + private final Map<PayloadChecksum.Type, PayloadChecksum> checksums = new LinkedHashMap<>(); + + private PayloadChecksums() { + Arrays.stream(PayloadChecksum.Type.values()).forEach(type -> checksums.put(type, PayloadChecksum.empty(type))); + } + + public static PayloadChecksums empty() { return new PayloadChecksums(); } + + public static PayloadChecksums from(PayloadChecksum... checksums) { + PayloadChecksums payloadChecksums = new PayloadChecksums(); + Arrays.stream(checksums).forEach(payloadChecksums::add); + return payloadChecksums; + } + + public static PayloadChecksums from(String configMd5, String configXxhash64) { + return new PayloadChecksums() + .add(new PayloadChecksum(configMd5, MD5)) + .add(new PayloadChecksum(configXxhash64, XXHASH64)); + } + + public static PayloadChecksums fromPayload(Payload payload) { + return new PayloadChecksums() + .add(new PayloadChecksum(ConfigUtils.getMd5(payload.getData()), MD5)) + .add(new PayloadChecksum(ConfigUtils.getXxhash64(payload.getData()), XXHASH64)); + } + + private PayloadChecksums add(PayloadChecksum checksum) { + checksums.put(checksum.type(), checksum); + return this; + } + + public void removeChecksumsOfType(PayloadChecksum.Type type) { checksums.remove(type); } + + public PayloadChecksum getForType(PayloadChecksum.Type type) { + return checksums.get(type); + } + + public boolean valid() { + return checksums.values().stream().allMatch(PayloadChecksum::valid); + } + + public boolean isEmpty() { return this.equals(empty()); } + + @Override + public String toString() { + return checksums.values().stream() + .map(checksum -> checksum.type().name() + ":" + checksum.asString()) + .collect(Collectors.joining(",")); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PayloadChecksums that = (PayloadChecksums) o; + return Objects.equals(checksums, that.checksums); + } + + @Override + public int hashCode() { + return Objects.hash(checksums); + } + +} diff --git a/config/src/main/java/com/yahoo/vespa/config/RawConfig.java b/config/src/main/java/com/yahoo/vespa/config/RawConfig.java index 028e84e4c29..78c3fefc936 100755 --- a/config/src/main/java/com/yahoo/vespa/config/RawConfig.java +++ b/config/src/main/java/com/yahoo/vespa/config/RawConfig.java @@ -1,4 +1,4 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config; import com.yahoo.config.ConfigInstance; @@ -28,7 +28,7 @@ public class RawConfig extends ConfigInstance { private final List<String> defContent; private final Payload payload; private final int errorCode; - private final String configMd5; + private final PayloadChecksums payloadChecksums; private final Optional<VespaVersion> vespaVersion; private long generation; private boolean applyOnRestart; @@ -40,29 +40,28 @@ public class RawConfig extends ConfigInstance { * @param defMd5 The md5 sum of the .def-file. */ public RawConfig(ConfigKey<?> key, String defMd5) { - this(key, defMd5, null, "", 0L, false, 0, Collections.emptyList(), Optional.empty()); + this(key, defMd5, null, PayloadChecksums.empty(), 0L, false, 0, Collections.emptyList(), Optional.empty()); } - public RawConfig(ConfigKey<?> key, String defMd5, Payload payload, String configMd5, long generation, - boolean applyOnRestart, List<String> defContent, - Optional<VespaVersion> vespaVersion) { - this(key, defMd5, payload, configMd5, generation, applyOnRestart, 0, defContent, vespaVersion); + public RawConfig(ConfigKey<?> key, String defMd5, Payload payload, PayloadChecksums payloadChecksums, long generation, + boolean applyOnRestart, List<String> defContent, Optional<VespaVersion> vespaVersion) { + this(key, defMd5, payload, payloadChecksums, generation, applyOnRestart, 0, defContent, vespaVersion); } /** Copy constructor */ public RawConfig(RawConfig rawConfig) { - this(rawConfig.key, rawConfig.defMd5, rawConfig.payload, rawConfig.configMd5, + this(rawConfig.key, rawConfig.defMd5, rawConfig.payload, rawConfig.payloadChecksums, rawConfig.generation, rawConfig.applyOnRestart, rawConfig.errorCode, rawConfig.defContent, rawConfig.getVespaVersion()); } - public RawConfig(ConfigKey<?> key, String defMd5, Payload payload, String configMd5, long generation, + public RawConfig(ConfigKey<?> key, String defMd5, Payload payload, PayloadChecksums payloadChecksums, long generation, boolean applyOnRestart, int errorCode, List<String> defContent, Optional<VespaVersion> vespaVersion) { this.key = key; this.defMd5 = ConfigUtils.getDefMd5FromRequest(defMd5, defContent); this.payload = payload; - this.configMd5 = configMd5; + this.payloadChecksums = payloadChecksums; this.generation = generation; this.applyOnRestart = applyOnRestart; this.errorCode = errorCode; @@ -79,7 +78,7 @@ public class RawConfig extends ConfigInstance { return new RawConfig(req.getConfigKey(), ConfigUtils.getDefMd5(req.getDefContent().asList()), req.getNewPayload(), - req.getNewConfigMd5(), + req.getNewChecksums(), req.getNewGeneration(), req.responseIsApplyOnRestart(), 0, @@ -96,7 +95,7 @@ public class RawConfig extends ConfigInstance { return new RawConfig(req.getConfigKey(), ConfigUtils.getDefMd5(req.getDefContent().asList()), Payload.from(new Utf8String(""), CompressionInfo.uncompressed()), - req.getRequestConfigMd5(), + req.getRequestConfigChecksums(), req.getRequestGeneration(), req.applyOnRestart(), 0, @@ -113,7 +112,7 @@ public class RawConfig extends ConfigInstance { public String getConfigId() { return key.getConfigId(); } - public String getConfigMd5() { return configMd5; } + public String getConfigMd5() { return payloadChecksums.getForType(PayloadChecksum.Type.MD5).asString(); } public String getDefMd5() { return defMd5; } @@ -133,6 +132,8 @@ public class RawConfig extends ConfigInstance { public Optional<VespaVersion> getVespaVersion() { return vespaVersion; } + public PayloadChecksums getPayloadChecksums() { return payloadChecksums; } + /** * Returns true if this config is equal to the config (same payload md5) in the given request. * @@ -174,11 +175,7 @@ public class RawConfig extends ConfigInstance { // while non-zero and equal error codes means configs are equal. if (isError()) return true; if (generation != other.generation) return false; - if (configMd5 != null) { - return configMd5.equals(other.configMd5); - } else { - return (other.configMd5 == null); - } + return (payloadChecksums.equals(((RawConfig) o).payloadChecksums)); } @Override @@ -194,9 +191,7 @@ public class RawConfig extends ConfigInstance { if (! isError()) { // configMd5 and generation only matter when the RawConfig is not an error. hash = 31 * hash + (int)(generation ^(generation >>>32)); - if (configMd5 != null) { - hash = 31 * hash + configMd5.hashCode(); - } + hash = 31 * hash + payloadChecksums.hashCode(); } return hash; } @@ -210,7 +205,7 @@ public class RawConfig extends ConfigInstance { sb.append(","); sb.append(key.getConfigId()); sb.append(","); - sb.append(getConfigMd5()); + sb.append(payloadChecksums); sb.append(","); sb.append(getGeneration()); sb.append(","); diff --git a/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java b/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java index adb27f37413..345118b5fd4 100644 --- a/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java +++ b/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.config.benchmark; import com.yahoo.collections.Tuple2; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.io.IOUtils; import com.yahoo.jrt.Spec; import com.yahoo.jrt.Supervisor; @@ -256,7 +257,7 @@ public class LoadTester { final long serverTimeout = 1000; return JRTClientConfigRequestV3.createWithParams(fullKey, DefContent.fromList(List.of(defContent.second)), - ConfigUtils.getCanonicalHostName(), "", + ConfigUtils.getCanonicalHostName(), PayloadChecksums.empty(), 0, serverTimeout, Trace.createDummy(), compressionType, Optional.empty()); } diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/ConfigResponse.java b/config/src/main/java/com/yahoo/vespa/config/protocol/ConfigResponse.java index f6fce56c227..98fc7f7a50e 100644 --- a/config/src/main/java/com/yahoo/vespa/config/protocol/ConfigResponse.java +++ b/config/src/main/java/com/yahoo/vespa/config/protocol/ConfigResponse.java @@ -1,6 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.protocol; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.text.AbstractUtf8Array; import java.io.IOException; @@ -36,4 +37,6 @@ public interface ConfigResponse { CompressionInfo getCompressionInfo(); + PayloadChecksums getPayloadChecksums(); + } diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequest.java b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequest.java index 8535cc23225..9d3b87574f3 100644 --- a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequest.java +++ b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequest.java @@ -1,6 +1,8 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.protocol; +import com.yahoo.vespa.config.PayloadChecksums; + /** * Interface for config requests used by clients. * @@ -57,13 +59,20 @@ public interface JRTClientConfigRequest extends JRTConfigRequest { boolean responseIsApplyOnRestart(); /** - * Get the config md5 of the config returned by the server. Return an empty string if no response has been returned. + * Gets the config md5 of the config returned by the server. Returns an empty string if no response has been returned. * * @return a config md5. */ String getNewConfigMd5(); /** + * Gets the config checksums of the config returned by the server. Returns an empty string if no response has been returned. + * + * @return a config checksum. + */ + PayloadChecksums getNewChecksums(); + + /** * Test whether or not the response contains an updated config or not. * False if no response has been returned. * diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequestV3.java b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequestV3.java index f5b558550e4..bd69c77921d 100644 --- a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequestV3.java +++ b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequestV3.java @@ -1,4 +1,4 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.protocol; import com.yahoo.config.ConfigInstance; @@ -12,14 +12,20 @@ import com.yahoo.text.Utf8; import com.yahoo.text.Utf8Array; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.JRTMethods; +import com.yahoo.vespa.config.PayloadChecksum; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.util.ConfigUtils; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.Optional; +import java.util.logging.Level; import java.util.logging.Logger; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; + /** * Represents version 3 config request for config clients. Provides methods for inspecting request and response * values. @@ -38,7 +44,7 @@ public class JRTClientConfigRequestV3 implements JRTClientConfigRequest { protected JRTClientConfigRequestV3(ConfigKey<?> key, String hostname, DefContent defSchema, - String configMd5, + PayloadChecksums payloadChecksums, long generation, long timeout, Trace trace, @@ -47,7 +53,7 @@ public class JRTClientConfigRequestV3 implements JRTClientConfigRequest { Slime data = SlimeRequestData.encodeRequest(key, hostname, defSchema, - configMd5, + payloadChecksums, generation, timeout, trace, @@ -97,7 +103,7 @@ public class JRTClientConfigRequestV3 implements JRTClientConfigRequest { return new JRTClientConfigRequestV3(getConfigKey(), getClientHostName(), getDefContent(), - isError() ? getRequestConfigMd5() : newConfMd5(), + isError() ? getRequestConfigChecksums() : newConfigChecksums(), isError() ? getRequestGeneration() : newGen(), timeout, Trace.createNew(), @@ -113,7 +119,7 @@ public class JRTClientConfigRequestV3 implements JRTClientConfigRequest { return createWithParams(sub.getKey(), sub.getDefContent(), ConfigUtils.getCanonicalHostName(), - configState.getChecksum().asString(), + configState.getChecksums(), configState.getGeneration(), sub.timingValues().getSubscribeTimeout(), trace, @@ -128,34 +134,34 @@ public class JRTClientConfigRequestV3 implements JRTClientConfigRequest { Optional<VespaVersion> vespaVersion) { String hostname = ConfigUtils.getCanonicalHostName(); return createWithParams(config.getKey(), - DefContent.fromList(config.getDefContent()), - hostname, - config.getConfigMd5(), - config.getGeneration(), - serverTimeout, - trace, - compressionType, - vespaVersion); + DefContent.fromList(config.getDefContent()), + hostname, + config.getPayloadChecksums(), + config.getGeneration(), + serverTimeout, + trace, + compressionType, + vespaVersion); } public static JRTClientConfigRequest createWithParams(ConfigKey<?> reqKey, DefContent defContent, String hostname, - String configMd5, + PayloadChecksums payloadChecksums, long generation, long serverTimeout, Trace trace, CompressionType compressionType, Optional<VespaVersion> vespaVersion) { return new JRTClientConfigRequestV3(reqKey, - hostname, - defContent, - configMd5, - generation, - serverTimeout, - trace, - compressionType, - vespaVersion); + hostname, + defContent, + payloadChecksums, + generation, + serverTimeout, + trace, + compressionType, + vespaVersion); } @Override @@ -177,7 +183,7 @@ public class JRTClientConfigRequestV3 implements JRTClientConfigRequest { .append(",").append(getTimeout()) .append(",").append(getVespaVersion().map(VespaVersion::toString).orElse("")) .append("'\n"); - sb.append("response='").append(getNewConfigMd5()) + sb.append("response='").append(getNewChecksums()) .append(",").append(getNewGeneration()) .append(",").append(responseIsApplyOnRestart()) .append("'\n"); @@ -221,6 +227,12 @@ public class JRTClientConfigRequestV3 implements JRTClientConfigRequest { return requestData.getTimeout(); } + protected PayloadChecksums newConfigChecksums() { + PayloadChecksums newChecksum = getNewChecksums(); + if (PayloadChecksums.empty().equals(newChecksum)) return getRequestConfigChecksums(); + return newChecksum; + } + protected String newConfMd5() { String newMd5 = getNewConfigMd5(); if ("".equals(newMd5)) return getRequestConfigMd5(); @@ -245,8 +257,20 @@ public class JRTClientConfigRequestV3 implements JRTClientConfigRequest { @Override public boolean hasUpdatedConfig() { - String respMd5 = getNewConfigMd5(); - return !respMd5.equals("") && !getRequestConfigMd5().equals(respMd5); + PayloadChecksums requestConfigChecksums = getRequestConfigChecksums(); + log.log(Level.FINE, () -> "request checksums for " + getConfigKey() + ":" + requestConfigChecksums); + + PayloadChecksums newChecksums = getNewChecksums(); + log.log(Level.FINE, () -> "new checksums for " + getConfigKey() + ": " + newChecksums); + if (newChecksums.isEmpty()) return false; + + PayloadChecksum respMd5 = newChecksums.getForType(MD5); + boolean updated = respMd5 != null && ! requestConfigChecksums.getForType(MD5).equals(respMd5); + + if (updated) return true; + + PayloadChecksum respXxhash64 = newChecksums.getForType(XXHASH64); + return respXxhash64 != null && ! requestConfigChecksums.getForType(XXHASH64).equals(respXxhash64); } @Override @@ -264,6 +288,10 @@ public class JRTClientConfigRequestV3 implements JRTClientConfigRequest { return requestData.getRequestDefMd5(); } + public PayloadChecksums getRequestConfigChecksums() { + return requestData.getRequestConfigChecksums(); + } + @Override public boolean validateResponse() { if (request.isError()) { @@ -285,7 +313,12 @@ public class JRTClientConfigRequestV3 implements JRTClientConfigRequest { @Override public String getNewConfigMd5() { - return responseData.getResponseConfigMd5(); + return responseData.getResponseConfigMd5().asString(); + } + + @Override + public PayloadChecksums getNewChecksums() { + return responseData.getResponseConfigChecksums(); } @Override diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTConfigRequest.java b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTConfigRequest.java index 5b8f040b8e3..0fc751dc49f 100644 --- a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTConfigRequest.java +++ b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTConfigRequest.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.protocol; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.jrt.Request; import com.yahoo.vespa.config.ConfigKey; @@ -44,6 +45,14 @@ public interface JRTConfigRequest { /** * Returns the generation of the requested config. If none has been given, 0 should be returned. + * Returns the checksum of the config request. Return an empty string if no response has been returned. + * + * @return a config checksum. + */ + PayloadChecksums getRequestConfigChecksums(); + + /** + * Returns the generation of the requested config. If none has been given, 0 should be returned. * * @return the generation in the request. */ diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequest.java b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequest.java index abc2b0b4473..938da855014 100644 --- a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequest.java +++ b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequest.java @@ -1,6 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.protocol; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.GetConfigRequest; /** @@ -34,9 +35,9 @@ public interface JRTServerConfigRequest extends JRTConfigRequest, GetConfigReque * @param generation The config generation of the given payload. * @param applyOnRestart true if this config should only be applied on the next restart, * false if it should be applied right away - * @param configMd5 The md5sum of the given payload. + * @param payloadChecksums checksums of the given payload. */ - void addOkResponse(Payload payload, long generation, boolean applyOnRestart, String configMd5); + void addOkResponse(Payload payload, long generation, boolean applyOnRestart, PayloadChecksums payloadChecksums); /** * Get the current config md5 of the client config. @@ -84,4 +85,12 @@ public interface JRTServerConfigRequest extends JRTConfigRequest, GetConfigReque */ Payload payloadFromResponse(ConfigResponse response); + + /** + * Returns the payload checksums from the config request. + * + * @return the payload checksumss from request. + */ + PayloadChecksums configPayloadChecksums(); + } diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java index e0a5b23a6d4..13d0ca1119a 100644 --- a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java +++ b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.config.protocol; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.jrt.DataValue; import com.yahoo.jrt.Request; import com.yahoo.jrt.StringValue; @@ -18,6 +19,9 @@ import java.nio.ByteBuffer; import java.util.Optional; import java.util.logging.Logger; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; + /** * The V3 config protocol implemented on the server side. The V3 protocol uses 2 fields: * @@ -68,9 +72,9 @@ public class JRTServerConfigRequestV3 implements JRTServerConfigRequest { } @Override - public void addOkResponse(Payload payload, long generation, boolean applyOnRestart, String configMd5) { + public void addOkResponse(Payload payload, long generation, boolean applyOnRestart, PayloadChecksums payloadChecksums) { this.applyOnRestart = applyOnRestart; - boolean changedConfig = !configMd5.equals(getRequestConfigMd5()); + boolean changedConfig = !payloadChecksums.equals(getRequestConfigChecksums()); boolean changedConfigAndNewGeneration = changedConfig && ConfigUtils.isGenerationNewer(generation, getRequestGeneration()); Payload responsePayload = payload.withCompression(getCompressionType()); ByteArrayOutputStream byteArrayOutputStream = new NoCopyByteArrayOutputStream(4096); @@ -78,7 +82,8 @@ public class JRTServerConfigRequestV3 implements JRTServerConfigRequest { JsonGenerator jsonGenerator = createJsonGenerator(byteArrayOutputStream); jsonGenerator.writeStartObject(); addCommonReturnValues(jsonGenerator); - setResponseField(jsonGenerator, SlimeResponseData.RESPONSE_CONFIG_MD5, configMd5); + setResponseField(jsonGenerator, SlimeResponseData.RESPONSE_CONFIG_MD5, payloadChecksums.getForType(MD5).asString()); + setResponseField(jsonGenerator, SlimeResponseData.RESPONSE_CONFIG_XXHASH64, payloadChecksums.getForType(XXHASH64).asString()); setResponseField(jsonGenerator, SlimeResponseData.RESPONSE_CONFIG_GENERATION, generation); setResponseField(jsonGenerator, SlimeResponseData.RESPONSE_APPLY_ON_RESTART, applyOnRestart); jsonGenerator.writeObjectFieldStart(SlimeResponseData.RESPONSE_COMPRESSION_INFO); @@ -194,6 +199,8 @@ public class JRTServerConfigRequestV3 implements JRTServerConfigRequest { @Override public String getRequestDefMd5() { return requestData.getRequestDefMd5(); } + public PayloadChecksums getRequestConfigChecksums() { return requestData.getRequestConfigChecksums(); } + private void addErrorResponse(int errorCode) { addErrorResponse(errorCode, ErrorCode.getName(errorCode)); } @@ -266,7 +273,9 @@ public class JRTServerConfigRequestV3 implements JRTServerConfigRequest { } @Override - public Optional<VespaVersion> getVespaVersion() { - return requestData.getVespaVersion(); - } + public Optional<VespaVersion> getVespaVersion() { return requestData.getVespaVersion(); } + + @Override + public PayloadChecksums configPayloadChecksums() { return requestData.getRequestConfigChecksums(); } + } diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/RequestValidation.java b/config/src/main/java/com/yahoo/vespa/config/protocol/RequestValidation.java index 7db15844e8b..5737dc112e0 100644 --- a/config/src/main/java/com/yahoo/vespa/config/protocol/RequestValidation.java +++ b/config/src/main/java/com/yahoo/vespa/config/protocol/RequestValidation.java @@ -1,15 +1,16 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.protocol; -import com.yahoo.config.subscription.impl.PayloadChecksum; import com.yahoo.vespa.config.ConfigDefinition; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ErrorCode; +import com.yahoo.vespa.config.PayloadChecksum; import java.util.logging.Logger; import java.util.regex.Matcher; -import static java.util.logging.Level.INFO; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static java.util.logging.Level.WARNING; /** * Static utility methods for verifying common request properties. @@ -22,31 +23,31 @@ public class RequestValidation { public static int validateRequest(JRTConfigRequest request) { ConfigKey<?> key = request.getConfigKey(); if (!RequestValidation.verifyName(key.getName())) { - log.log(INFO, "Illegal name '" + key.getName() + "'"); + log.log(WARNING, "Illegal name '" + key.getName() + "'"); return ErrorCode.ILLEGAL_NAME; } if (!RequestValidation.verifyNamespace(key.getNamespace())) { - log.log(INFO, "Illegal name space '" + key.getNamespace() + "'"); + log.log(WARNING, "Illegal name space '" + key.getNamespace() + "'"); return ErrorCode.ILLEGAL_NAME_SPACE; } - if (!(new PayloadChecksum(request.getRequestDefMd5()).valid())) { - log.log(INFO, "Illegal checksum '" + key.getNamespace() + "'"); + if (!(new PayloadChecksum(request.getRequestDefMd5(), MD5).valid())) { + log.log(WARNING, "Illegal checksum '" + key.getNamespace() + "'"); return ErrorCode.ILLEGAL_DEF_MD5; // TODO: Use ILLEGAL_DEF_CHECKSUM } - if (!new PayloadChecksum(request.getRequestConfigMd5()).valid()) { - log.log(INFO, "Illegal config checksum '" + request.getRequestConfigMd5() + "'"); + if (! request.getRequestConfigChecksums().valid()) { + log.log(WARNING, "Illegal config checksum '" + request.getRequestConfigChecksums() + "'"); return ErrorCode.ILLEGAL_CONFIG_MD5; // TODO: Use ILLEGAL_CONFIG_CHECKSUM } if (!RequestValidation.verifyGeneration(request.getRequestGeneration())) { - log.log(INFO, "Illegal generation '" + request.getRequestGeneration() + "'"); + log.log(WARNING, "Illegal generation '" + request.getRequestGeneration() + "'"); return ErrorCode.ILLEGAL_GENERATION; } if (!RequestValidation.verifyTimeout(request.getTimeout())) { - log.log(INFO, "Illegal timeout '" + request.getTimeout() + "'"); + log.log(WARNING, "Illegal timeout '" + request.getTimeout() + "'"); return ErrorCode.ILLEGAL_TIMEOUT; } if (!RequestValidation.verifyHostname(request.getClientHostName())) { - log.log(INFO, "Illegal client host name '" + request.getClientHostName() + "'"); + log.log(WARNING, "Illegal client host name '" + request.getClientHostName() + "'"); return ErrorCode.ILLEGAL_CLIENT_HOSTNAME; } return 0; diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeConfigResponse.java b/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeConfigResponse.java index 1ccf6e367fc..8d08717942b 100644 --- a/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeConfigResponse.java +++ b/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeConfigResponse.java @@ -1,6 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.protocol; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.text.AbstractUtf8Array; import com.yahoo.vespa.config.ConfigPayload; @@ -8,6 +9,8 @@ import java.io.IOException; import java.io.OutputStream; import java.nio.ByteBuffer; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; + /** * Class for serializing config responses based on {@link com.yahoo.slime.Slime} implementing the {@link ConfigResponse} interface. * @@ -19,25 +22,42 @@ public class SlimeConfigResponse implements ConfigResponse { private final CompressionInfo compressionInfo; private final long generation; private final boolean applyOnRestart; - private final String configMd5; + private final PayloadChecksums payloadChecksums; + + public static SlimeConfigResponse fromConfigPayload(ConfigPayload payload, + long generation, + boolean applyOnRestart, + PayloadChecksums payloadChecksums) { + AbstractUtf8Array data = payload.toUtf8Array(true); + return new SlimeConfigResponse(data, + generation, + applyOnRestart, + payloadChecksums, + CompressionInfo.create(CompressionType.UNCOMPRESSED, data.getByteLength())); + } - public static SlimeConfigResponse fromConfigPayload(ConfigPayload payload, long generation, - boolean applyOnRestart, String configMd5) { + // TODO: Legacy method, remove when not used anymore + public static SlimeConfigResponse fromConfigPayload(ConfigPayload payload, + long generation, + boolean applyOnRestart, + String configMd5) { AbstractUtf8Array data = payload.toUtf8Array(true); - return new SlimeConfigResponse(data, generation, applyOnRestart, - configMd5, + return new SlimeConfigResponse(data, + generation, + applyOnRestart, + PayloadChecksums.from(configMd5, ""), CompressionInfo.create(CompressionType.UNCOMPRESSED, data.getByteLength())); } public SlimeConfigResponse(AbstractUtf8Array payload, long generation, boolean applyOnRestart, - String configMd5, + PayloadChecksums payloadChecksums, CompressionInfo compressionInfo) { this.payload = payload; this.generation = generation; this.applyOnRestart = applyOnRestart; - this.configMd5 = configMd5; + this.payloadChecksums = payloadChecksums; this.compressionInfo = compressionInfo; } @@ -56,7 +76,7 @@ public class SlimeConfigResponse implements ConfigResponse { @Override public String getConfigMd5() { - return configMd5; + return payloadChecksums.getForType(MD5).asString(); } @Override @@ -68,11 +88,13 @@ public class SlimeConfigResponse implements ConfigResponse { @Override public String toString() { return "generation=" + generation + "\n" + - "configmd5=" + configMd5 + "\n" + + "checksums=" + payloadChecksums + "\n" + Payload.from(payload, compressionInfo).withCompression(CompressionType.UNCOMPRESSED); } @Override public CompressionInfo getCompressionInfo() { return compressionInfo; } + @Override + public PayloadChecksums getPayloadChecksums() { return payloadChecksums; } } diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeRequestData.java b/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeRequestData.java index b885623a78b..2b69ae4ebeb 100644 --- a/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeRequestData.java +++ b/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeRequestData.java @@ -7,10 +7,15 @@ import com.yahoo.slime.Inspector; import com.yahoo.slime.Slime; import com.yahoo.slime.SlimeUtils; import com.yahoo.vespa.config.ConfigKey; +import com.yahoo.vespa.config.PayloadChecksum; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.util.ConfigUtils; import java.util.Optional; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; + /** * Contains slime request data objects. Provides methods for reading various fields from slime request data. * All data is read lazily. @@ -27,6 +32,7 @@ class SlimeRequestData { private static final String REQUEST_CLIENT_HOSTNAME = "clientHostname"; private static final String REQUEST_CURRENT_GENERATION = "currentGeneration"; private static final String REQUEST_CONFIG_MD5 = "configMD5"; + private static final String REQUEST_CONFIG_XXHASH64 = "configXxhash64"; private static final String REQUEST_TRACE = "trace"; private static final String REQUEST_TIMEOUT = "timeout"; private static final String REQUEST_DEF_MD5 = "defMD5"; @@ -79,6 +85,17 @@ class SlimeRequestData { String getRequestDefMd5() { return getRequestField(REQUEST_DEF_MD5).asString(); } + PayloadChecksum getRequestConfigXxhash64() { + Inspector xxhash64Field = getRequestField(REQUEST_CONFIG_XXHASH64); + return xxhash64Field.valid() + ? new PayloadChecksum(xxhash64Field.asString(), XXHASH64) + : PayloadChecksum.empty(XXHASH64); + } + + PayloadChecksums getRequestConfigChecksums() { + return PayloadChecksums.from(getRequestConfigMd5(), getRequestConfigXxhash64().asString()); + } + long getRequestGeneration() { return getRequestField(REQUEST_CURRENT_GENERATION).asLong(); } @@ -86,7 +103,7 @@ class SlimeRequestData { static Slime encodeRequest(ConfigKey<?> key, String hostname, DefContent defSchema, - String configMd5, + PayloadChecksums payloadChecksums, long generation, long timeout, Trace trace, @@ -102,7 +119,10 @@ class SlimeRequestData { request.setString(REQUEST_CLIENT_CONFIGID, key.getConfigId()); request.setString(REQUEST_CLIENT_HOSTNAME, hostname); defSchema.serialize(request.setArray(REQUEST_DEF_CONTENT)); - request.setString(REQUEST_CONFIG_MD5, configMd5); + if (payloadChecksums.getForType(XXHASH64) != null) + request.setString(REQUEST_CONFIG_XXHASH64, payloadChecksums.getForType(XXHASH64).asString()); + if (payloadChecksums.getForType(MD5) != null) + request.setString(REQUEST_CONFIG_MD5, payloadChecksums.getForType(MD5).asString()); request.setLong(REQUEST_CURRENT_GENERATION, generation); request.setLong(REQUEST_TIMEOUT, timeout); request.setString(REQUEST_COMPRESSION_TYPE, compressionType.name()); diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeResponseData.java b/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeResponseData.java index cc98587456c..ca519fd7061 100644 --- a/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeResponseData.java +++ b/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeResponseData.java @@ -1,11 +1,16 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.protocol; +import com.yahoo.vespa.config.PayloadChecksum; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.jrt.Request; import com.yahoo.slime.Inspector; import com.yahoo.slime.Slime; import com.yahoo.slime.SlimeUtils; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; + /** * Contains response data for a slime response and methods for decoding the response data that * are common to all {@link Slime} based config requests. @@ -22,6 +27,7 @@ class SlimeResponseData { static final String RESPONSE_CLIENT_HOSTNAME = "clientHostname"; static final String RESPONSE_TRACE = "trace"; static final String RESPONSE_CONFIG_MD5 = "configMD5"; + static final String RESPONSE_CONFIG_XXHASH64 = "configXxhash64"; static final String RESPONSE_CONFIG_GENERATION = "generation"; static final String RESPONSE_APPLY_ON_RESTART = "applyOnRestart"; static final String RESPONSE_COMPRESSION_INFO = "compressionInfo"; @@ -58,9 +64,22 @@ class SlimeResponseData { return trace.valid() ? Trace.fromSlime(trace) : Trace.createDummy(); } - String getResponseConfigMd5() { - Inspector inspector = getResponseField(RESPONSE_CONFIG_MD5); - return inspector.valid() ? inspector.asString() : ""; + PayloadChecksum getResponseConfigMd5() { + Inspector md5Field = getResponseField(RESPONSE_CONFIG_MD5); + return md5Field.valid() + ? new PayloadChecksum(md5Field.asString(), MD5) + : PayloadChecksum.empty(MD5); + } + + PayloadChecksum getResponseConfigXxhash64() { + Inspector xxhash64Field = getResponseField(RESPONSE_CONFIG_XXHASH64); + return xxhash64Field.valid() + ? new PayloadChecksum(xxhash64Field.asString(), XXHASH64) + : PayloadChecksum.empty(XXHASH64); + } + + PayloadChecksums getResponseConfigChecksums() { + return PayloadChecksums.from(getResponseConfigMd5(), getResponseConfigXxhash64()); } CompressionInfo getCompressionInfo() { diff --git a/config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java b/config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java index a7fc8afcad9..329661bf7ae 100644 --- a/config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java +++ b/config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java @@ -10,6 +10,8 @@ import com.yahoo.text.AbstractUtf8Array; import com.yahoo.text.Utf8; import com.yahoo.vespa.config.ConfigDefinitionKey; import com.yahoo.vespa.config.ConfigPayload; +import net.jpountz.xxhash.XXHash64; +import net.jpountz.xxhash.XXHashFactory; import java.io.ByteArrayOutputStream; import java.io.File; @@ -94,6 +96,15 @@ public class ConfigUtils { } } + public static String getXxhash64(AbstractUtf8Array input) { + return getXxhash64(input.wrap()); + } + + public static String getXxhash64(ByteBuffer input) { + XXHash64 hasher = XXHashFactory.fastestInstance().hash64(); + return Long.toHexString(hasher.hash(input, 0)).toLowerCase(); + } + /** * Replaces sequences of spaces with 1 space, unless inside quotes. Public for testing; * diff --git a/config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java b/config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java index 7bdaeb7d367..919155a3944 100644 --- a/config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java +++ b/config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java @@ -9,6 +9,7 @@ import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ConnectionPool; import com.yahoo.vespa.config.ErrorCode; import com.yahoo.vespa.config.ErrorType; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.TimingValues; import com.yahoo.vespa.config.protocol.JRTServerConfigRequestV3; import org.junit.Test; @@ -139,7 +140,7 @@ public class JRTConfigRequesterTest { ConfigSubscriber subscriber = new ConfigSubscriber(); final TimingValues timingValues = getTestTimingValues(); JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(subscriber, timingValues); - sub.setConfig(1L, false, config(), PayloadChecksum.empty()); + sub.setConfig(1L, false, config(), PayloadChecksums.empty()); final MockConnection connection = new MockConnection(new ErrorResponseHandler()); JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); @@ -165,7 +166,7 @@ public class JRTConfigRequesterTest { ConfigSubscriber subscriber = new ConfigSubscriber(); final TimingValues timingValues = getTestTimingValues(); JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(subscriber, timingValues); - sub.setConfig(1L, false, config(), PayloadChecksum.empty()); + sub.setConfig(1L, false, config(), PayloadChecksums.empty()); final MockConnection connection = new MockConnection(new ErrorResponseHandler(com.yahoo.jrt.ErrorCode.TIMEOUT)); JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); @@ -179,7 +180,7 @@ public class JRTConfigRequesterTest { ConfigSubscriber subscriber = new ConfigSubscriber(); final TimingValues timingValues = getTestTimingValues(); JRTConfigSubscription<SimpletypesConfig> sub = createSubscription(subscriber, timingValues); - sub.setConfig(1L, false, config(), PayloadChecksum.empty()); + sub.setConfig(1L, false, config(), PayloadChecksums.empty()); final MockConnection connection = new MockConnection(new ErrorResponseHandler(ErrorCode.UNKNOWN_DEFINITION)); JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues); diff --git a/config/src/test/java/com/yahoo/vespa/config/RawConfigTest.java b/config/src/test/java/com/yahoo/vespa/config/RawConfigTest.java index e1d11f82eea..96a2d976f51 100644 --- a/config/src/test/java/com/yahoo/vespa/config/RawConfigTest.java +++ b/config/src/test/java/com/yahoo/vespa/config/RawConfigTest.java @@ -1,8 +1,9 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config; import com.yahoo.text.Utf8String; -import com.yahoo.vespa.config.protocol.*; +import com.yahoo.vespa.config.protocol.CompressionInfo; +import com.yahoo.vespa.config.protocol.Payload; import com.yahoo.vespa.config.protocol.VespaVersion; import com.yahoo.vespa.config.util.ConfigUtils; import org.junit.Test; @@ -11,6 +12,8 @@ import java.util.Arrays; import java.util.List; import java.util.Optional; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.assertEquals; @@ -26,11 +29,11 @@ import static org.junit.Assert.assertThat; public class RawConfigTest { private static final ConfigKey<?> key = new ConfigKey<>("foo", "id", "bar"); - private static List<String> defContent = Arrays.asList("version=1", "anInt int"); + private static final List<String> defContent = Arrays.asList("version=1", "anInt int"); private static final String defMd5 = ConfigUtils.getDefMd5FromRequest("", defContent); - private static final String configMd5 = "012345"; - private static Payload payload = Payload.from(new Utf8String("anInt 1"), CompressionInfo.uncompressed()); - private static long generation = 1L; + private static final PayloadChecksums payloadChecksums = PayloadChecksums.from("012345", ""); + private static final Payload payload = Payload.from(new Utf8String("anInt 1"), CompressionInfo.uncompressed()); + private static final long generation = 1L; @Test public void basic() { @@ -47,7 +50,7 @@ public class RawConfigTest { RawConfig copiedConfig = new RawConfig(config); assertEquals(config, copiedConfig); - assertEquals("bar.foo," + defMd5 + ",id,,0,null", config.toString()); + assertEquals("bar.foo," + defMd5 + ",id,MD5:,XXHASH64:,0,null", config.toString()); assertEquals(Optional.empty(), config.getVespaVersion()); } @@ -61,14 +64,14 @@ public class RawConfigTest { assertThat(config.hashCode(), is(not(new RawConfig(key, "a").hashCode()))); // different def md5 // different generation - config = new RawConfig(key, defMd5, payload, configMd5, generation, false, defContent, Optional.empty()); - RawConfig config2 = new RawConfig(key, defMd5, payload, configMd5, 2L, false, defContent, Optional.empty()); + config = new RawConfig(key, defMd5, payload, payloadChecksums, generation, false, defContent, Optional.empty()); + RawConfig config2 = new RawConfig(key, defMd5, payload, payloadChecksums, 2L, false, defContent, Optional.empty()); assertThat(config, is(not(config2))); assertThat(config.hashCode(), is(not(config2.hashCode()))); // different config md5 and with vespa version final VespaVersion vespaVersion = VespaVersion.fromString("5.37.38"); - RawConfig config3 = new RawConfig(key, defMd5, payload, "9999", generation, false, defContent, Optional.of(vespaVersion)); + RawConfig config3 = new RawConfig(key, defMd5, payload, PayloadChecksums.from("9999", ""), generation, false, defContent, Optional.of(vespaVersion)); assertThat(config, is(not(config3))); assertThat(config.hashCode(), is(not(config3.hashCode()))); // Check that vespa version is set correctly @@ -82,42 +85,43 @@ public class RawConfigTest { assertNotEquals(config, key); // errors - RawConfig errorConfig1 = new RawConfig(key, defMd5, payload, configMd5, generation, false, 1, defContent, Optional.empty()); + RawConfig errorConfig1 = new RawConfig(key, defMd5, payload, payloadChecksums, generation, false, 1, defContent, Optional.empty()); assertThat(errorConfig1, is(errorConfig1)); assertThat(config, is(not(errorConfig1))); assertThat(config.hashCode(), is(not(errorConfig1.hashCode()))); assertThat(errorConfig1, is(errorConfig1)); - RawConfig errorConfig2 = new RawConfig(key, defMd5, payload, configMd5, generation, false, 2, defContent, Optional.empty()); + RawConfig errorConfig2 = new RawConfig(key, defMd5, payload, payloadChecksums, generation, false, 2, defContent, Optional.empty()); assertThat(errorConfig1, is(not(errorConfig2))); assertThat(errorConfig1.hashCode(), is(not(errorConfig2.hashCode()))); } @Test public void payload() { - RawConfig config = new RawConfig(key, defMd5, payload, configMd5, generation, false, defContent, Optional.empty()); - assertThat(config.getPayload(), is(payload)); - assertThat(config.getConfigMd5(), is(configMd5)); - assertThat(config.getGeneration(), is(generation)); - assertThat(config.getDefContent(), is(defContent)); + RawConfig config = new RawConfig(key, defMd5, payload, payloadChecksums, generation, false, defContent, Optional.empty()); + assertEquals(config.getPayload(), payload); + assertEquals(config.getConfigMd5(), payloadChecksums.getForType(MD5).asString()); + assertEquals(config.getPayloadChecksums().getForType(XXHASH64), payloadChecksums.getForType(XXHASH64)); + assertEquals(config.getGeneration(), generation); + assertEquals(config.getDefContent(), defContent); } @Test public void require_correct_defmd5() { final String defMd5ForEmptyDefContent = "d41d8cd98f00b204e9800998ecf8427e"; - RawConfig config = new RawConfig(key, null, payload, configMd5, generation, false, defContent, Optional.empty()); + RawConfig config = new RawConfig(key, null, payload, payloadChecksums, generation, false, defContent, Optional.empty()); assertThat(config.getDefMd5(), is(defMd5)); - config = new RawConfig(key, "", payload, configMd5, generation, false, defContent, Optional.empty()); + config = new RawConfig(key, "", payload, payloadChecksums, generation, false, defContent, Optional.empty()); assertThat(config.getDefMd5(), is(defMd5)); - config = new RawConfig(key, defMd5, payload, configMd5, generation, false, defContent, Optional.empty()); + config = new RawConfig(key, defMd5, payload, payloadChecksums, generation, false, defContent, Optional.empty()); assertThat(config.getDefMd5(), is(defMd5)); - config = new RawConfig(key, null, payload, configMd5, generation, false, null, Optional.empty()); + config = new RawConfig(key, null, payload, payloadChecksums, generation, false, null, Optional.empty()); assertNull(config.getDefMd5()); - config = new RawConfig(key, null, payload, configMd5, generation, false, List.of(""), Optional.empty()); + config = new RawConfig(key, null, payload, payloadChecksums, generation, false, List.of(""), Optional.empty()); assertThat(config.getDefMd5(), is(defMd5ForEmptyDefContent)); - config = new RawConfig(key, "", payload, configMd5, generation, false, null, Optional.empty()); + config = new RawConfig(key, "", payload, payloadChecksums, generation, false, null, Optional.empty()); assertThat(config.getDefMd5(), is("")); - config = new RawConfig(key, "", payload, configMd5, generation, false, List.of(""), Optional.empty()); + config = new RawConfig(key, "", payload, payloadChecksums, generation, false, List.of(""), Optional.empty()); assertThat(config.getDefMd5(), is(defMd5ForEmptyDefContent)); } diff --git a/config/src/test/java/com/yahoo/vespa/config/RequestValidationTest.java b/config/src/test/java/com/yahoo/vespa/config/RequestValidationTest.java index 8c11db15f7c..8e0c0d1671e 100644 --- a/config/src/test/java/com/yahoo/vespa/config/RequestValidationTest.java +++ b/config/src/test/java/com/yahoo/vespa/config/RequestValidationTest.java @@ -1,10 +1,11 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config; -import com.yahoo.config.subscription.impl.PayloadChecksum; import com.yahoo.vespa.config.protocol.RequestValidation; import org.junit.Test; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -21,11 +22,11 @@ public class RequestValidationTest { @Test public void testVerifyDefMd5() { - assertTrue(PayloadChecksum.empty().valid()); - assertTrue(new PayloadChecksum("e8f0c01c7c3dcb8d3f62d7ff777fce6b").valid()); - assertTrue(new PayloadChecksum("e8f0c01c7c3dcb8d3f62d7ff777fce6B").valid()); - assertFalse(new PayloadChecksum("aaaaaaaaaaaaaaaaaa").valid()); - assertFalse(new PayloadChecksum("-8f0c01c7c3dcb8d3f62d7ff777fce6b").valid()); + assertTrue(PayloadChecksum.empty(MD5).valid()); + assertTrue(new PayloadChecksum("e8f0c01c7c3dcb8d3f62d7ff777fce6b", MD5).valid()); + assertTrue(new PayloadChecksum("e8f0c01c7c3dcb8d3f62d7ff777fce6B", MD5).valid()); + assertTrue(new PayloadChecksum("e8f0c01c7c3dcb8d", XXHASH64).valid()); + assertFalse(new PayloadChecksum("-8f0c01c7c3dcb8d3f62d7ff777fce6b", MD5).valid()); } @Test diff --git a/config/src/test/java/com/yahoo/vespa/config/protocol/ConfigResponseTest.java b/config/src/test/java/com/yahoo/vespa/config/protocol/ConfigResponseTest.java index c53a6b5c73d..f00e95ccea2 100644 --- a/config/src/test/java/com/yahoo/vespa/config/protocol/ConfigResponseTest.java +++ b/config/src/test/java/com/yahoo/vespa/config/protocol/ConfigResponseTest.java @@ -1,6 +1,7 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.protocol; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.foo.SimpletypesConfig; import com.yahoo.text.AbstractUtf8Array; import com.yahoo.text.Utf8Array; @@ -12,6 +13,8 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -23,14 +26,20 @@ public class ConfigResponseTest { @Test public void require_that_slime_response_is_initialized() throws IOException { ConfigPayload configPayload = ConfigPayload.fromInstance(new SimpletypesConfig(new SimpletypesConfig.Builder())); - ConfigResponse response = SlimeConfigResponse.fromConfigPayload(configPayload, 3, false, "mymd5"); + PayloadChecksums payloadChecksums = PayloadChecksums.fromPayload(Payload.from(configPayload)); + ConfigResponse response = + SlimeConfigResponse.fromConfigPayload(configPayload, + 3, + false, + payloadChecksums); ByteArrayOutputStream baos = new ByteArrayOutputStream(); response.serialize(baos, CompressionType.UNCOMPRESSED); String payload = baos.toString(StandardCharsets.UTF_8); assertNotNull(payload); - assertEquals("{\"boolval\":false,\"doubleval\":0.0,\"enumval\":\"VAL1\",\"intval\":0,\"longval\":0,\"stringval\":\"s\"}", payload.toString()); - assertEquals(response.getGeneration(), 3L); - assertEquals(response.getConfigMd5(), "mymd5"); + assertEquals("{\"boolval\":false,\"doubleval\":0.0,\"enumval\":\"VAL1\",\"intval\":0,\"longval\":0,\"stringval\":\"s\"}", payload); + assertEquals(3L, response.getGeneration()); + assertEquals(payloadChecksums.getForType(MD5), response.getPayloadChecksums().getForType(MD5)); + assertEquals(payloadChecksums.getForType(XXHASH64), response.getPayloadChecksums().getForType(XXHASH64)); baos = new ByteArrayOutputStream(); response.serialize(baos, CompressionType.UNCOMPRESSED); @@ -42,7 +51,7 @@ public class ConfigResponseTest { ConfigPayload configPayload = ConfigPayload.fromInstance(new SimpletypesConfig(new SimpletypesConfig.Builder())); AbstractUtf8Array data = configPayload.toUtf8Array(true); Utf8Array bytes = new Utf8Array(new LZ4PayloadCompressor().compress(data.wrap())); - ConfigResponse response = new SlimeConfigResponse(bytes, 3, false, "mymd5", CompressionInfo.create(CompressionType.LZ4, data.getByteLength())); + ConfigResponse response = new SlimeConfigResponse(bytes, 3, false, PayloadChecksums.empty(), CompressionInfo.create(CompressionType.LZ4, data.getByteLength())); ByteArrayOutputStream baos = new ByteArrayOutputStream(); response.serialize(baos, CompressionType.UNCOMPRESSED); String payload = baos.toString(StandardCharsets.UTF_8); diff --git a/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java b/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java index d6ce246aa1f..b1ed3a089ae 100644 --- a/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java +++ b/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java @@ -17,6 +17,7 @@ import com.yahoo.test.ManualClock; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ConfigPayload; import com.yahoo.vespa.config.ErrorCode; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.RawConfig; import com.yahoo.vespa.config.TimingValues; import com.yahoo.vespa.config.util.ConfigUtils; @@ -27,6 +28,8 @@ import java.util.Collections; import java.util.List; import java.util.Optional; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -53,7 +56,7 @@ public class JRTConfigRequestV3Test { private final long currentGeneration = 3; private final long timeout = 5000; private Trace trace ; - private final String configMd5 = ConfigUtils.getMd5(createPayload().getData()); + private final PayloadChecksums payloadChecksums = PayloadChecksums.fromPayload(createPayload()); private JRTClientConfigRequest clientReq; private JRTServerConfigRequest serverReq; @@ -79,8 +82,12 @@ public class JRTConfigRequestV3Test { @Test public void emptypayload() { ConfigPayload payload = ConfigPayload.empty(); - SlimeConfigResponse response = SlimeConfigResponse.fromConfigPayload(payload, 0, false, ConfigUtils.getMd5(payload)); - serverReq.addOkResponse(serverReq.payloadFromResponse(response), response.getGeneration(), false, response.getConfigMd5()); + PayloadChecksums payloadChecksums = PayloadChecksums.fromPayload(Payload.from(payload)); + SlimeConfigResponse response = SlimeConfigResponse.fromConfigPayload(payload, + 0, + false, + payloadChecksums); + serverReq.addOkResponse(serverReq.payloadFromResponse(response), response.getGeneration(), false, payloadChecksums); assertTrue(clientReq.validateResponse()); assertTrue(clientReq.hasUpdatedGeneration()); assertEquals("{}", clientReq.getNewPayload().withCompression(CompressionType.UNCOMPRESSED).getData().toString()); @@ -97,7 +104,8 @@ public class JRTConfigRequestV3Test { @Test public void next_request_when_error_is_correct() { - serverReq.addOkResponse(createPayload(), 999999, false, "newmd5"); + Payload payload = createPayload(); + serverReq.addOkResponse(payload, 999999, false, PayloadChecksums.fromPayload(payload)); serverReq.addErrorResponse(ErrorCode.OUTDATED_CONFIG, "error message"); JRTClientConfigRequest next = clientReq.nextRequest(6); // Should use config md5 and generation from the request, not the response @@ -111,7 +119,7 @@ public class JRTConfigRequestV3Test { Payload payload = createPayload("vale"); String md5 = ConfigUtils.getMd5(payload.getData()); long generation = 4L; - serverReq.addOkResponse(payload, generation, false, md5); + serverReq.addOkResponse(payload, generation, false, PayloadChecksums.fromPayload(payload)); assertTrue(clientReq.validateResponse()); assertThat(clientReq.getNewPayload().withCompression(CompressionType.UNCOMPRESSED).getData().toString(), is(payload.getData().toString())); assertThat(clientReq.getNewGeneration(), is(4L)); @@ -137,7 +145,7 @@ public class JRTConfigRequestV3Test { @Test public void generation_only_is_updated() { Payload payload = createPayload(); - serverReq.addOkResponse(payload, 4L, false, ConfigUtils.getMd5(payload.getData())); + serverReq.addOkResponse(payload, 4L, false, PayloadChecksums.fromPayload(payload)); boolean value = clientReq.validateResponse(); assertTrue(clientReq.errorMessage(), value); assertFalse(clientReq.hasUpdatedConfig()); @@ -147,7 +155,7 @@ public class JRTConfigRequestV3Test { @Test public void nothing_is_updated() { Payload payload = createPayload(); - serverReq.addOkResponse(payload, currentGeneration, false, configMd5); + serverReq.addOkResponse(payload, currentGeneration, false, payloadChecksums); assertTrue(clientReq.validateResponse()); assertFalse(clientReq.hasUpdatedConfig()); assertFalse(clientReq.hasUpdatedGeneration()); @@ -158,7 +166,7 @@ public class JRTConfigRequestV3Test { Payload payload = Payload.from(ConfigPayload.empty()); clientReq = createReq(payload); serverReq = createReq(clientReq.getRequest()); - serverReq.addOkResponse(payload, currentGeneration, false, ConfigUtils.getMd5(payload.getData())); + serverReq.addOkResponse(payload, currentGeneration, false, PayloadChecksums.fromPayload(payload)); boolean val = clientReq.validateResponse(); assertTrue(clientReq.errorMessage(), val); assertFalse(clientReq.hasUpdatedConfig()); @@ -195,7 +203,7 @@ public class JRTConfigRequestV3Test { @Override public void createResponse() { JRTServerConfigRequest serverRequest = createReq(request); - serverRequest.addOkResponse(createPayload(), currentGeneration, false, configMd5); + serverRequest.addOkResponse(createPayload(), currentGeneration, false, payloadChecksums); } }); @@ -206,8 +214,10 @@ public class JRTConfigRequestV3Test { assertTrue(sub.nextConfig(120_0000)); sub.close(); JRTClientConfigRequest nextReq = createReq(sub, Trace.createNew()); - assertThat(nextReq.getRequestConfigMd5(), is(sub.getConfigState().getChecksum().asString())); - assertThat(nextReq.getRequestGeneration(), is(currentGeneration)); + assertEquals(nextReq.getRequestConfigMd5(), sub.getConfigState().getChecksums().getForType(MD5).asString()); + assertEquals(nextReq.getRequestConfigChecksums().getForType(MD5).asString(), sub.getConfigState().getChecksums().getForType(MD5).asString()); + assertEquals(nextReq.getRequestConfigChecksums().getForType(XXHASH64).asString(), sub.getConfigState().getChecksums().getForType(XXHASH64).asString()); + assertEquals(nextReq.getRequestGeneration(), currentGeneration); } @Test @@ -225,12 +235,12 @@ public class JRTConfigRequestV3Test { @Test public void parameters_are_validated() { assertTrue(serverReq.validateParameters()); - assertValidationFail(createReq("35#$#!$@#", defNamespace, hostname, configId, configMd5, currentGeneration, timeout, trace)); - assertValidationFail(createReq(defName, "abcd.o#$*(!&$", hostname, configId, configMd5, currentGeneration, timeout, trace)); - assertValidationFail(createReq(defName, defNamespace, hostname, configId, "34", currentGeneration, timeout, trace)); - assertValidationFail(createReq(defName, defNamespace, hostname, configId, configMd5, -34, timeout, trace)); - assertValidationFail(createReq(defName, defNamespace, hostname, configId, configMd5, currentGeneration, -23, trace)); - assertValidationFail(createReq(defName, defNamespace, "", configId, configMd5, currentGeneration, timeout, trace)); + assertValidationFail(createReq("35#$#!$@#", defNamespace, hostname, configId, payloadChecksums, currentGeneration, timeout, trace)); + assertValidationFail(createReq(defName, "abcd.o#$*(!&$", hostname, configId, payloadChecksums, currentGeneration, timeout, trace)); + assertValidationFail(createReq(defName, defNamespace, hostname, configId, PayloadChecksums.from("1234", "opnq"), currentGeneration, timeout, trace)); + assertValidationFail(createReq(defName, defNamespace, hostname, configId, payloadChecksums, -34, timeout, trace)); + assertValidationFail(createReq(defName, defNamespace, hostname, configId, payloadChecksums, currentGeneration, -23, trace)); + assertValidationFail(createReq(defName, defNamespace, "", configId, payloadChecksums, currentGeneration, timeout, trace)); } private void assertValidationFail(JRTClientConfigRequest req) { @@ -248,12 +258,12 @@ public class JRTConfigRequestV3Test { } private JRTClientConfigRequest createReq(String defName, String defNamespace, - String hostname, String configId, String configMd5, + String hostname, String configId, PayloadChecksums payloadChecksums, long currentGeneration, long timeout, Trace trace) { return JRTClientConfigRequestV3.createWithParams(ConfigKey.createFull(defName, configId, defNamespace), DefContent.fromList(List.of(configDefinition)), hostname, - configMd5, + payloadChecksums, currentGeneration, timeout, trace, @@ -276,13 +286,13 @@ public class JRTConfigRequestV3Test { private JRTClientConfigRequest createReq() { trace = Trace.createNew(3, new ManualClock()); trace.trace(1, "hei"); - return createReq(defName, defNamespace, hostname, configId, configMd5, currentGeneration, timeout, trace); + return createReq(defName, defNamespace, hostname, configId, payloadChecksums, currentGeneration, timeout, trace); } private JRTClientConfigRequest createReq(Payload payload) { trace = Trace.createNew(3, new ManualClock()); trace.trace(1, "hei"); - return createReq(defName, defNamespace, hostname, configId, ConfigUtils.getMd5(payload.getData()), currentGeneration, timeout, trace); + return createReq(defName, defNamespace, hostname, configId, PayloadChecksums.fromPayload(payload), currentGeneration, timeout, trace); } private void request_is_parsed_base() { @@ -294,7 +304,8 @@ public class JRTConfigRequestV3Test { assertThat(serverReq.getDefContent().asStringArray(), is(configDefinition)); assertFalse(serverReq.noCache()); assertTrue(serverReq.getRequestTrace().toString().contains("hi")); - assertThat(serverReq.getRequestConfigMd5(), is(configMd5)); + assertThat(serverReq.getRequestConfigChecksums().getForType(MD5), is(payloadChecksums.getForType(MD5))); + assertThat(serverReq.getRequestConfigChecksums().getForType(XXHASH64), is(payloadChecksums.getForType(XXHASH64))); assertThat(serverReq.getRequestGeneration(), is(currentGeneration)); } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java index 00d010e75c8..7f39d678fdf 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java @@ -44,7 +44,10 @@ public class SuperModelController { public ConfigResponse resolveConfig(GetConfigRequest request) { ConfigKey<?> configKey = request.getConfigKey(); validateConfigDefinition(request.getConfigKey(), request.getDefContent()); - return responseFactory.createResponse(model.getConfig(configKey).toUtf8Array(true), generation, false); + return responseFactory.createResponse(model.getConfig(configKey).toUtf8Array(true), + generation, + false, + request.configPayloadChecksums()); } private void validateConfigDefinition(ConfigKey<?> configKey, DefContent defContent) { diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java index df1427bdf6d..0b409d38196 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java @@ -130,12 +130,11 @@ public class Application implements ModelResult { metricUpdater.incrementFailedRequests(); throw new UnknownConfigDefinitionException("Unable to find config definition for '" + configKey.getNamespace() + "." + configKey.getName()); } - log.log(Level.FINE, () -> TenantRepository.logPre(getId()) + ("Resolving " + configKey + " with config definition " + def)); + log.log(Level.FINE, () -> TenantRepository.logPre(getId()) + "Resolving " + configKey + " with config definition " + def); var payload = createPayload(configKey, def); - var response = responseFactory.createResponse(payload.getFirst(), applicationGeneration, payload.getSecond()); - return response; + return responseFactory.createResponse(payload.getFirst(), applicationGeneration, payload.getSecond(), req.configPayloadChecksums()); } private Pair<AbstractUtf8Array, Boolean> createPayload(ConfigKey<?> configKey, ConfigDefinition def) { diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java index 414782a43f4..ea0cba511cd 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java @@ -6,7 +6,6 @@ import com.yahoo.component.Version; import com.yahoo.config.application.api.ApplicationPackage; import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.application.api.FileRegistry; -import com.yahoo.config.model.api.ApplicationRoles; import com.yahoo.config.model.api.ConfigDefinitionRepo; import com.yahoo.config.model.api.ConfigServerSpec; import com.yahoo.config.model.api.ContainerEndpoint; @@ -23,7 +22,6 @@ import com.yahoo.config.provision.AthenzDomain; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.DockerImage; import com.yahoo.config.provision.HostName; -import com.yahoo.config.provision.NodeResources; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; import com.yahoo.container.jdisc.secretstore.SecretStore; @@ -41,6 +39,7 @@ import java.security.cert.X509Certificate; import java.util.List; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ExecutorService; import java.util.function.ToIntFunction; import static com.yahoo.vespa.config.server.ConfigServerSpec.fromConfig; @@ -59,6 +58,7 @@ public class ModelContextImpl implements ModelContext { private final DeployLogger deployLogger; private final ConfigDefinitionRepo configDefinitionRepo; private final FileRegistry fileRegistry; + private final ExecutorService executor; private final HostProvisioner hostProvisioner; private final Provisioned provisioned; private final Optional<? extends Reindexing> reindexing; @@ -85,6 +85,7 @@ public class ModelContextImpl implements ModelContext { DeployLogger deployLogger, ConfigDefinitionRepo configDefinitionRepo, FileRegistry fileRegistry, + ExecutorService executor, Optional<? extends Reindexing> reindexing, HostProvisioner hostProvisioner, Provisioned provisioned, @@ -99,6 +100,7 @@ public class ModelContextImpl implements ModelContext { this.deployLogger = deployLogger; this.configDefinitionRepo = configDefinitionRepo; this.fileRegistry = fileRegistry; + this.executor = executor; this.reindexing = reindexing; this.hostProvisioner = hostProvisioner; this.provisioned = provisioned; @@ -138,6 +140,11 @@ public class ModelContextImpl implements ModelContext { public FileRegistry getFileRegistry() { return fileRegistry; } @Override + public ExecutorService getExecutor() { + return executor; + } + + @Override public Optional<? extends Reindexing> reindexing() { return reindexing; } @Override diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpConfigRequest.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpConfigRequest.java index c01008fafa0..8abb701606c 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpConfigRequest.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpConfigRequest.java @@ -11,6 +11,7 @@ import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.jdisc.application.BindingMatch; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.GetConfigRequest; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.protocol.DefContent; import com.yahoo.vespa.config.protocol.VespaVersion; import com.yahoo.vespa.config.server.RequestHandler; @@ -195,4 +196,7 @@ public class HttpConfigRequest implements GetConfigRequest, TenantRequest { @Override public String getRequestDefMd5() { return ConfigUtils.getDefMd5(getDefContent().asList()); } + @Override + public PayloadChecksums configPayloadChecksums() { return PayloadChecksums.empty(); } + } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java index ffb4550caf0..b9728a4dd43 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java @@ -5,7 +5,6 @@ import com.google.common.collect.ImmutableSet; import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.component.Version; import com.yahoo.config.application.api.ApplicationPackage; -import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.model.api.ConfigDefinitionRepo; import com.yahoo.config.model.api.Model; import com.yahoo.config.model.api.ModelContext; @@ -29,20 +28,17 @@ import com.yahoo.vespa.config.server.monitoring.Metrics; import com.yahoo.vespa.config.server.provision.HostProvisionerProvider; import com.yahoo.vespa.config.server.session.SessionZooKeeperClient; import com.yahoo.vespa.config.server.session.SilentDeployLogger; -import com.yahoo.vespa.config.server.tenant.ApplicationRolesStore; import com.yahoo.vespa.config.server.tenant.ContainerEndpointsCache; import com.yahoo.vespa.config.server.tenant.EndpointCertificateMetadataStore; import com.yahoo.vespa.config.server.tenant.EndpointCertificateRetriever; -import com.yahoo.vespa.config.server.tenant.TenantListener; import com.yahoo.vespa.config.server.tenant.TenantRepository; import com.yahoo.vespa.curator.Curator; import com.yahoo.vespa.flags.FlagSource; -import java.security.cert.X509Certificate; import java.util.Comparator; -import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.ExecutorService; import java.util.logging.Level; import java.util.logging.Logger; @@ -65,11 +61,13 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> { private final Curator curator; private final FlagSource flagSource; private final SecretStore secretStore; + private final ExecutorService executor; public ActivatedModelsBuilder(TenantName tenant, long applicationGeneration, SessionZooKeeperClient zkClient, Optional<ApplicationSet> currentActiveApplicationSet, + ExecutorService executor, Curator curator, Metrics metrics, PermanentApplicationPackage permanentApplicationPackage, @@ -80,11 +78,7 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> { Zone zone, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo) { - super(modelFactoryRegistry, - configserverConfig, - zone, - hostProvisionerProvider, - new SilentDeployLogger()); + super(modelFactoryRegistry, configserverConfig, zone, hostProvisionerProvider, new SilentDeployLogger()); this.tenant = tenant; this.applicationGeneration = applicationGeneration; this.zkClient = zkClient; @@ -95,6 +89,7 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> { this.curator = curator; this.flagSource = flagSource; this.secretStore = secretStore; + this.executor = executor; } @Override @@ -116,6 +111,7 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> { new SilentDeployLogger(), configDefinitionRepo, getForVersionOrLatest(applicationPackage.getFileRegistries(), modelFactory.version()).orElse(new MockFileRegistry()), + executor, new ApplicationCuratorDatabase(tenant, curator).readReindexingStatus(applicationId), createStaticProvisioner(applicationPackage, modelContextProperties.applicationId(), provisioned), provisioned, diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java index e5ef6dd38c7..1a8092d36b3 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java @@ -38,6 +38,7 @@ import java.time.Duration; import java.time.Instant; import java.util.List; import java.util.Optional; +import java.util.concurrent.ExecutorService; import java.util.logging.Level; import java.util.logging.Logger; import java.util.stream.Collectors; @@ -57,11 +58,13 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P private final Optional<ApplicationSet> currentActiveApplicationSet; private final ModelContext.Properties properties; private final Curator curator; + private final ExecutorService executor; public PreparedModelsBuilder(ModelFactoryRegistry modelFactoryRegistry, PermanentApplicationPackage permanentApplicationPackage, ConfigDefinitionRepo configDefinitionRepo, FileRegistry fileRegistry, + ExecutorService executor, HostProvisionerProvider hostProvisionerProvider, Curator curator, HostValidator<ApplicationId> hostValidator, @@ -79,6 +82,7 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P this.params = params; this.currentActiveApplicationSet = currentActiveApplicationSet; this.properties = properties; + this.executor = executor; } @Override @@ -100,6 +104,7 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P deployLogger(), configDefinitionRepo, fileRegistry, + executor, new ApplicationCuratorDatabase(applicationId.tenant(), curator).readReindexingStatus(applicationId), createHostProvisioner(applicationPackage, provisioned), provisioned, diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java index 7afeebdd3cf..8c1cdeb753a 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java @@ -1,10 +1,16 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.rpc; import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.text.AbstractUtf8Array; import com.yahoo.vespa.config.ConfigPayload; +import com.yahoo.vespa.config.PayloadChecksum; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.protocol.ConfigResponse; +import com.yahoo.vespa.config.util.ConfigUtils; + +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; /** * Represents a component that creates config responses from a payload. Different implementations @@ -28,12 +34,41 @@ public interface ConfigResponseFactory { /** * Creates a {@link ConfigResponse} for a given payload and generation. * - * @param rawPayload the {@link ConfigPayload} to put in the response - * @param generation the payload generation - * @param applyOnRestart true if this config change should only be applied on restart, - * false if it should be applied immediately + * @param rawPayload the {@link ConfigPayload} to put in the response + * @param generation the payload generation + * @param applyOnRestart true if this config change should only be applied on restart, + * false if it should be applied immediately + * @param requestsPayloadChecksums payload checksums from requests * @return a {@link ConfigResponse} that can be sent to the client */ - ConfigResponse createResponse(AbstractUtf8Array rawPayload, long generation, boolean applyOnRestart); + ConfigResponse createResponse(AbstractUtf8Array rawPayload, + long generation, + boolean applyOnRestart, + PayloadChecksums requestsPayloadChecksums); + + /** Generates payload checksums based on what type of checksums exist in request */ + default PayloadChecksums generatePayloadChecksums(AbstractUtf8Array rawPayload, PayloadChecksums requestsPayloadChecksums) { + PayloadChecksum requestChecksumMd5 = requestsPayloadChecksums.getForType(MD5); + PayloadChecksum requestChecksumXxhash64 = requestsPayloadChecksums.getForType(XXHASH64); + + PayloadChecksum md5 = PayloadChecksum.empty(MD5); + PayloadChecksum xxhash64 = PayloadChecksum.empty(XXHASH64); + // Response contains same checksum type as in request, except when both are empty, + // then use both checksum types in response + if (requestChecksumMd5.isEmpty() && requestChecksumXxhash64.isEmpty() + || ( ! requestChecksumMd5.isEmpty() && ! requestChecksumXxhash64.isEmpty())) { + md5 = new PayloadChecksum(ConfigUtils.getMd5(rawPayload), MD5); + xxhash64 = new PayloadChecksum(ConfigUtils.getXxhash64(rawPayload), XXHASH64); + } else if ( ! requestChecksumMd5.isEmpty()) { + md5 = new PayloadChecksum(ConfigUtils.getMd5(rawPayload), MD5); + } else if (requestChecksumMd5.isEmpty() && !requestChecksumXxhash64.isEmpty()) { + xxhash64 = new PayloadChecksum(ConfigUtils.getXxhash64(rawPayload), XXHASH64); + } else { + md5 = new PayloadChecksum(ConfigUtils.getMd5(rawPayload), MD5); + xxhash64 = new PayloadChecksum(ConfigUtils.getXxhash64(rawPayload), XXHASH64); + } + + return PayloadChecksums.from(md5, xxhash64); + } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java index 820f5c15318..bad03862133 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java @@ -4,8 +4,8 @@ package com.yahoo.vespa.config.server.rpc; import com.yahoo.cloud.config.SentinelConfig; import com.yahoo.collections.Pair; import com.yahoo.component.Version; -import com.yahoo.config.ConfigInstance; import com.yahoo.config.provision.TenantName; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.jrt.Request; import com.yahoo.net.HostName; import com.yahoo.vespa.config.ConfigPayload; @@ -13,18 +13,19 @@ import com.yahoo.vespa.config.ErrorCode; import com.yahoo.vespa.config.UnknownConfigIdException; import com.yahoo.vespa.config.protocol.ConfigResponse; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; -import com.yahoo.vespa.config.protocol.SlimeConfigResponse; +import com.yahoo.vespa.config.protocol.Payload; import com.yahoo.vespa.config.protocol.Trace; import com.yahoo.vespa.config.protocol.VespaVersion; import com.yahoo.vespa.config.server.GetConfigContext; import com.yahoo.vespa.config.server.UnknownConfigDefinitionException; import com.yahoo.vespa.config.server.tenant.TenantRepository; -import com.yahoo.vespa.config.util.ConfigUtils; import java.util.Optional; import java.util.logging.Level; import java.util.logging.Logger; +import static com.yahoo.vespa.config.protocol.SlimeConfigResponse.fromConfigPayload; + /** * @author hmusum */ @@ -126,7 +127,7 @@ class GetConfigProcessor implements Runnable { // config == null is not an error, but indicates that the config will be returned later. if ((config != null) && (!config.hasEqualConfig(request) || config.hasNewerGeneration(request) || forceResponse)) { // debugLog(trace, "config response before encoding:" + config.toString()); - request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), config.applyOnRestart(), config.getConfigMd5()); + request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), config.applyOnRestart(), config.getPayloadChecksums()); if (logDebug(trace)) { debugLog(trace, "return response: " + request.getShortDescription()); } @@ -166,9 +167,11 @@ class GetConfigProcessor implements Runnable { private void returnEmpty(JRTServerConfigRequest request) { log.log(Level.FINE, () -> "Returning empty sentinel config for request from " + request.getClientHostName()); var emptyPayload = ConfigPayload.fromInstance(new SentinelConfig.Builder().build()); - String configMd5 = ConfigUtils.getMd5(emptyPayload); - ConfigResponse config = SlimeConfigResponse.fromConfigPayload(emptyPayload, 0, false, configMd5); - request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), false, config.getConfigMd5()); + ConfigResponse config = fromConfigPayload(emptyPayload, + 0, + false, + PayloadChecksums.fromPayload(Payload.from(emptyPayload))); + request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), false, config.getPayloadChecksums()); respond(request); } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java index f309b30cf8d..6a1ecfac7bb 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java @@ -1,14 +1,14 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.rpc; import com.yahoo.text.AbstractUtf8Array; import com.yahoo.text.Utf8Array; import com.yahoo.vespa.config.LZ4PayloadCompressor; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.protocol.CompressionInfo; import com.yahoo.vespa.config.protocol.CompressionType; import com.yahoo.vespa.config.protocol.ConfigResponse; import com.yahoo.vespa.config.protocol.SlimeConfigResponse; -import com.yahoo.vespa.config.util.ConfigUtils; /** * Compressor that compresses config payloads to lz4. @@ -22,11 +22,12 @@ public class LZ4ConfigResponseFactory implements ConfigResponseFactory { @Override public ConfigResponse createResponse(AbstractUtf8Array rawPayload, long generation, - boolean applyOnRestart) { - String configMd5 = ConfigUtils.getMd5(rawPayload); + boolean applyOnRestart, + PayloadChecksums requestsPayloadChecksums) { CompressionInfo info = CompressionInfo.create(CompressionType.LZ4, rawPayload.getByteLength()); Utf8Array compressed = new Utf8Array(compressor.compress(rawPayload.wrap())); - return new SlimeConfigResponse(compressed, generation, applyOnRestart, configMd5, info); + PayloadChecksums payloadChecksums = generatePayloadChecksums(rawPayload, requestsPayloadChecksums); + return new SlimeConfigResponse(compressed, generation, applyOnRestart, payloadChecksums, info); } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java index 889548196aa..ce973e538b7 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java @@ -1,12 +1,12 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.rpc; import com.yahoo.text.AbstractUtf8Array; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.protocol.CompressionInfo; import com.yahoo.vespa.config.protocol.CompressionType; import com.yahoo.vespa.config.protocol.ConfigResponse; import com.yahoo.vespa.config.protocol.SlimeConfigResponse; -import com.yahoo.vespa.config.util.ConfigUtils; /** * Simply returns an uncompressed payload. @@ -18,10 +18,11 @@ public class UncompressedConfigResponseFactory implements ConfigResponseFactory @Override public ConfigResponse createResponse(AbstractUtf8Array rawPayload, long generation, - boolean applyOnRestart) { - String configMd5 = ConfigUtils.getMd5(rawPayload); + boolean applyOnRestart, + PayloadChecksums requestsPayloadChecksums) { CompressionInfo info = CompressionInfo.create(CompressionType.UNCOMPRESSED, rawPayload.getByteLength()); - return new SlimeConfigResponse(rawPayload, generation, applyOnRestart, configMd5, info); + PayloadChecksums payloadChecksums = generatePayloadChecksums(rawPayload, requestsPayloadChecksums); + return new SlimeConfigResponse(rawPayload, generation, applyOnRestart, payloadChecksums, info); } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java index 4c148e6e580..e58f5985288 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java @@ -9,7 +9,6 @@ import com.yahoo.config.FileReference; import com.yahoo.config.application.api.ApplicationPackage; import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.application.api.FileRegistry; -import com.yahoo.config.model.api.ApplicationRoles; import com.yahoo.config.model.api.ConfigDefinitionRepo; import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.config.model.api.EndpointCertificateMetadata; @@ -39,7 +38,6 @@ import com.yahoo.vespa.config.server.http.InvalidApplicationException; import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry; import com.yahoo.vespa.config.server.modelfactory.PreparedModelsBuilder; import com.yahoo.vespa.config.server.provision.HostProvisionerProvider; -import com.yahoo.vespa.config.server.tenant.ApplicationRolesStore; import com.yahoo.vespa.config.server.tenant.ContainerEndpointsCache; import com.yahoo.vespa.config.server.tenant.EndpointCertificateMetadataStore; import com.yahoo.vespa.config.server.tenant.EndpointCertificateRetriever; @@ -56,6 +54,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ExecutorService; import java.util.logging.Level; import java.util.logging.Logger; import java.util.stream.Collectors; @@ -79,9 +78,11 @@ public class SessionPreparer { private final Zone zone; private final SecretStore secretStore; private final FlagSource flagSource; + private final ExecutorService executor; public SessionPreparer(ModelFactoryRegistry modelFactoryRegistry, FileDistributionFactory fileDistributionFactory, + ExecutorService executor, HostProvisionerProvider hostProvisionerProvider, PermanentApplicationPackage permanentApplicationPackage, ConfigserverConfig configserverConfig, @@ -100,8 +101,11 @@ public class SessionPreparer { this.zone = zone; this.secretStore = secretStore; this.flagSource = flagSource; + this.executor = executor; } + ExecutorService getExecutor() { return executor; } + /** * Prepares a session (validates, builds model, writes to zookeeper and distributes files) * @@ -202,6 +206,7 @@ public class SessionPreparer { permanentApplicationPackage, configDefinitionRepo, fileRegistry, + executor, hostProvisionerProvider, curator, hostValidator, diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java index ff9820ffb0c..43a684c1fba 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java @@ -503,6 +503,7 @@ public class SessionRepository { session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, + sessionPreparer.getExecutor(), curator, metrics, permanentApplicationPackage, diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java index ef219a21221..4d1a421e748 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java @@ -6,6 +6,7 @@ import com.google.inject.Inject; import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.cloud.config.ZookeeperServerConfig; import com.yahoo.concurrent.DaemonThreadFactory; +import com.yahoo.concurrent.InThreadExecutorService; import com.yahoo.concurrent.Lock; import com.yahoo.concurrent.Locks; import com.yahoo.concurrent.StripedExecutor; @@ -35,6 +36,7 @@ import com.yahoo.vespa.curator.Curator; import com.yahoo.vespa.curator.transaction.CuratorOperations; import com.yahoo.vespa.curator.transaction.CuratorTransaction; import com.yahoo.vespa.flags.FlagSource; +import com.yahoo.vespa.flags.Flags; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; import org.apache.curator.framework.state.ConnectionState; @@ -105,6 +107,7 @@ public class TenantRepository { private final StripedExecutor<TenantName> zkSessionWatcherExecutor; private final StripedExecutor<TenantName> zkApplicationWatcherExecutor; private final FileDistributionFactory fileDistributionFactory; + private final ExecutorService deployHelperExecutor; private final FlagSource flagSource; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; @@ -198,6 +201,8 @@ public class TenantRepository { this.reloadListener = reloadListener; this.tenantListener = tenantListener; this.zookeeperServerConfig = zookeeperServerConfig; + // This we should control with a feature flag. + this.deployHelperExecutor = createModelBuilderExecutor(Flags.NUM_DEPLOY_HELPER_THREADS.bindTo(flagSource).value()); curator.framework().getConnectionStateListenable().addListener(this::stateChanged); @@ -215,6 +220,14 @@ public class TenantRepository { TimeUnit.SECONDS); } + private ExecutorService createModelBuilderExecutor(int numThreads) { + if (numThreads == 0) return new InThreadExecutorService(); + if (numThreads < 0) { + numThreads = Runtime.getRuntime().availableProcessors(); + } + return Executors.newFixedThreadPool(numThreads, ThreadFactoryFactory.getDaemonThreadFactory("deploy-helper")); + } + private void notifyTenantsLoaded() { tenantListener.onTenantsLoaded(); } @@ -335,6 +348,7 @@ public class TenantRepository { PermanentApplicationPackage permanentApplicationPackage = new PermanentApplicationPackage(configserverConfig); SessionPreparer sessionPreparer = new SessionPreparer(modelFactoryRegistry, fileDistributionFactory, + deployHelperExecutor, hostProvisionerProvider, permanentApplicationPackage, configserverConfig, diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java index 41af0296534..28d50a5396e 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java @@ -26,6 +26,7 @@ import com.yahoo.text.Utf8; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.ConfigPayload; import com.yahoo.vespa.config.GetConfigRequest; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.protocol.ConfigResponse; import com.yahoo.vespa.config.protocol.DefContent; import com.yahoo.vespa.config.protocol.VespaVersion; @@ -810,6 +811,9 @@ public class ApplicationRepositoryTest { @Override public String getRequestDefMd5() { return ""; } + @Override + public PayloadChecksums configPayloadChecksums() { return PayloadChecksums.empty(); } + }, Optional.empty()); } diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java index d97f809da6e..9a8b89e55c0 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.config.server; import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.component.Version; +import com.yahoo.concurrent.InThreadExecutorService; import com.yahoo.config.application.api.ApplicationPackage; import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.config.model.api.HostProvisioner; @@ -58,6 +59,7 @@ public class ModelContextImplTest { new BaseDeployLogger(), new StaticConfigDefinitionRepo(), new MockFileRegistry(), + new InThreadExecutorService(), Optional.empty(), hostProvisioner, new Provisioned(), diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java index eb1e541a540..5016107c411 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java @@ -1,4 +1,4 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server; import com.yahoo.cloud.config.LbServicesConfig; @@ -13,11 +13,11 @@ import com.yahoo.config.provision.ApplicationName; import com.yahoo.config.provision.InstanceName; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.jrt.Request; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.protocol.CompressionType; import com.yahoo.vespa.config.protocol.DefContent; -import com.yahoo.vespa.config.protocol.JRTClientConfigRequestV3; import com.yahoo.vespa.config.protocol.JRTServerConfigRequestV3; import com.yahoo.vespa.config.protocol.Trace; import com.yahoo.vespa.config.server.model.SuperModelConfigProvider; @@ -36,6 +36,7 @@ import java.util.Map; import java.util.Optional; import static com.yahoo.config.model.api.container.ContainerServiceType.QRSERVER; +import static com.yahoo.vespa.config.protocol.JRTClientConfigRequestV3.createWithParams; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @@ -73,11 +74,12 @@ public class SuperModelControllerTest { @Test(expected = UnknownConfigDefinitionException.class) public void test_unknown_config_definition() { - String md5 = "asdfasf"; - Request request = JRTClientConfigRequestV3.createWithParams(new ConfigKey<>("foo", "id", "bar", null), DefContent.fromList(Collections.emptyList()), - "fromHost", md5, 1, 1, Trace.createDummy(), CompressionType.UNCOMPRESSED, - Optional.empty()) - .getRequest(); + PayloadChecksums payloadChecksums = PayloadChecksums.empty(); + Request request = createWithParams(new ConfigKey<>("foo", "id", "bar", null), + DefContent.fromList(Collections.emptyList()), "fromHost", + payloadChecksums, 1, 1, Trace.createDummy(), + CompressionType.UNCOMPRESSED, Optional.empty()) + .getRequest(); JRTServerConfigRequestV3 v3Request = JRTServerConfigRequestV3.createFromRequest(request); handler.resolveConfig(v3Request); } diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationTest.java index 44491667760..a7fc69d56df 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationTest.java @@ -11,6 +11,7 @@ import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ApplicationName; import com.yahoo.config.provision.InstanceName; import com.yahoo.config.provision.TenantName; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.jrt.Request; import com.yahoo.text.Utf8; import com.yahoo.vespa.config.ConfigDefinitionKey; @@ -148,10 +149,13 @@ public class ApplicationTest { } private static GetConfigRequest createRequest(String name, String namespace, String[] schema) { - Request request = JRTClientConfigRequestV3. - createWithParams(new ConfigKey<>(name, "admin/model", namespace, null), DefContent.fromArray(schema), - "fromHost", "", 0, 100, Trace.createDummy(), CompressionType.UNCOMPRESSED, - Optional.empty()).getRequest(); + Request request = + JRTClientConfigRequestV3.createWithParams(new ConfigKey<>(name, "admin/model", namespace, null), + DefContent.fromArray(schema), "fromHost", + PayloadChecksums.empty(), 0, 100, + Trace.createDummy(), CompressionType.UNCOMPRESSED, + Optional.empty()) + .getRequest(); return JRTServerConfigRequestV3.createFromRequest(request); } diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java index 747a0ad3241..b164c3e5cd5 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java @@ -1,11 +1,16 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.rpc; import com.yahoo.vespa.config.ConfigPayload; +import com.yahoo.vespa.config.PayloadChecksum; +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.vespa.config.protocol.CompressionType; import com.yahoo.vespa.config.protocol.ConfigResponse; +import com.yahoo.vespa.config.protocol.Payload; import org.junit.Test; +import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5; +import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64; import static org.junit.Assert.assertEquals; /** @@ -13,22 +18,63 @@ import static org.junit.Assert.assertEquals; */ public class ConfigResponseFactoryTest { + private static final ConfigPayload payload = ConfigPayload.fromString("{ \"field1\": 11, \"field2\": 11 }"); + + private static final PayloadChecksums payloadChecksums = PayloadChecksums.fromPayload(Payload.from(payload)); + private static final PayloadChecksums payloadChecksumsEmpty = PayloadChecksums.empty(); + private static final PayloadChecksums payloadChecksumsOnlyMd5 = + PayloadChecksums.from(PayloadChecksum.fromPayload(Payload.from(payload), MD5)); + private static final PayloadChecksums payloadChecksumsOnlyXxhash64 = + PayloadChecksums.from(PayloadChecksum.fromPayload(Payload.from(payload), XXHASH64)); + @Test public void testUncompressedFactory() { UncompressedConfigResponseFactory responseFactory = new UncompressedConfigResponseFactory(); - ConfigResponse response = responseFactory.createResponse(ConfigPayload.empty().toUtf8Array(true), 3, false); + ConfigResponse response = responseFactory.createResponse(payload.toUtf8Array(true), 3, false, payloadChecksums); assertEquals(CompressionType.UNCOMPRESSED, response.getCompressionInfo().getCompressionType()); assertEquals(3L,response.getGeneration()); - assertEquals(2, response.getPayload().getByteLength()); + assertEquals(25, response.getPayload().getByteLength()); + assertEquals(payloadChecksums, response.getPayloadChecksums()); } @Test public void testLZ4CompressedFactory() { + // Both checksums in request + { + ConfigResponse response = createResponse(payloadChecksums); + assertEquals(payloadChecksums, response.getPayloadChecksums()); + } + + // No checksums in request (empty checksums), both checksums should be in response + { + ConfigResponse response = createResponse(payloadChecksumsEmpty); + assertEquals(payloadChecksums.getForType(MD5), response.getPayloadChecksums().getForType(MD5)); + assertEquals(payloadChecksums.getForType(XXHASH64), response.getPayloadChecksums().getForType(XXHASH64)); + } + + // Only md5 checksums in request + { + ConfigResponse response = createResponse(payloadChecksumsOnlyMd5); + assertEquals(payloadChecksumsOnlyMd5.getForType(MD5), response.getPayloadChecksums().getForType(MD5)); + assertEquals(payloadChecksumsOnlyMd5.getForType(XXHASH64), response.getPayloadChecksums().getForType(XXHASH64)); + } + + // Only xxhash64 checksums in request + { + ConfigResponse response = createResponse(payloadChecksumsOnlyXxhash64); + assertEquals(payloadChecksumsOnlyXxhash64.getForType(MD5), response.getPayloadChecksums().getForType(MD5)); + assertEquals(payloadChecksumsOnlyXxhash64.getForType(XXHASH64), response.getPayloadChecksums().getForType(XXHASH64)); + } + } + + private ConfigResponse createResponse(PayloadChecksums payloadChecksums) { LZ4ConfigResponseFactory responseFactory = new LZ4ConfigResponseFactory(); - ConfigResponse response = responseFactory.createResponse(ConfigPayload.empty().toUtf8Array(true), 3, false); + ConfigResponse response = responseFactory.createResponse(payload.toUtf8Array(true), 3, false, payloadChecksums); assertEquals(CompressionType.LZ4, response.getCompressionInfo().getCompressionType()); assertEquals(3L, response.getGeneration()); - assertEquals(3, response.getPayload().getByteLength()); + assertEquals(23, response.getPayload().getByteLength()); + + return response; } } diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/DelayedConfigResponseTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/DelayedConfigResponseTest.java index 738e8c9827d..21e1c1f3448 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/DelayedConfigResponseTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/DelayedConfigResponseTest.java @@ -2,12 +2,11 @@ package com.yahoo.vespa.config.server.rpc; import com.yahoo.config.provision.ApplicationId; - +import com.yahoo.vespa.config.PayloadChecksums; import com.yahoo.jrt.Request; import com.yahoo.vespa.config.ConfigKey; import com.yahoo.vespa.config.protocol.CompressionType; import com.yahoo.vespa.config.protocol.DefContent; -import com.yahoo.vespa.config.protocol.JRTClientConfigRequestV3; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; import com.yahoo.vespa.config.protocol.JRTServerConfigRequestV3; import com.yahoo.vespa.config.protocol.Trace; @@ -22,6 +21,7 @@ import java.util.Collections; import java.util.List; import java.util.Optional; +import static com.yahoo.vespa.config.protocol.JRTClientConfigRequestV3.createWithParams; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; @@ -41,7 +41,7 @@ public class DelayedConfigResponseTest { MockRpcServer rpc = new MockRpcServer(13337, temporaryFolder.newFolder()); DelayedConfigResponses responses = new DelayedConfigResponses(rpc, 1, false); assertThat(responses.size(), is(0)); - JRTServerConfigRequest req = createRequest("foo", "myid", "mymd5", 3, 1000000, "bar"); + JRTServerConfigRequest req = createRequest("foo", "myid", 3, 1000000, "bar"); req.setDelayedResponse(true); GetConfigContext context = GetConfigContext.testContext(ApplicationId.defaultId()); responses.delayResponse(req, context); @@ -49,7 +49,7 @@ public class DelayedConfigResponseTest { req.setDelayedResponse(false); responses.delayResponse(req, context); - responses.delayResponse(createRequest("foolio", "myid", "mymd5", 3, 100000, "bar"), context); + responses.delayResponse(createRequest("foolio", "myid", 3, 100000, "bar"), context); assertThat(responses.size(), is(2)); assertTrue(req.isDelayedResponse()); List<DelayedConfigResponses.DelayedConfigResponse> it = responses.allDelayedResponses(); @@ -61,7 +61,7 @@ public class DelayedConfigResponseTest { GetConfigContext context = GetConfigContext.testContext(ApplicationId.defaultId()); MockRpcServer rpc = new MockRpcServer(13337, temporaryFolder.newFolder()); DelayedConfigResponses responses = new DelayedConfigResponses(rpc, 1, false); - responses.delayResponse(createRequest("foolio", "myid", "mymd5", 3, 100000, "bar"), context); + responses.delayResponse(createRequest("foolio", "myid", 3, 100000, "bar"), context); assertThat(responses.size(), is(1)); responses.allDelayedResponses().get(0).cancelAndRemove(); assertThat(responses.size(), is(0)); @@ -73,17 +73,18 @@ public class DelayedConfigResponseTest { DelayedConfigResponses responses = new DelayedConfigResponses(rpc, 1, false); assertThat(responses.size(), is(0)); assertThat(responses.toString(), is("DelayedConfigResponses. Average Size=0")); - JRTServerConfigRequest req = createRequest("foo", "myid", "mymd5", 3, 100, "bar"); + JRTServerConfigRequest req = createRequest("foo", "myid", 3, 100, "bar"); responses.delayResponse(req, GetConfigContext.testContext(ApplicationId.defaultId())); rpc.waitUntilSet(Duration.ofSeconds(5)); assertThat(rpc.latestRequest, is(req)); } - private JRTServerConfigRequest createRequest(String configName, String configId, String md5, long generation, long timeout, String namespace) { - Request request = JRTClientConfigRequestV3. - createWithParams(new ConfigKey<>(configName, configId, namespace, null), DefContent.fromList(Collections.emptyList()), - "fromHost", md5, generation, timeout, Trace.createDummy(), CompressionType.UNCOMPRESSED, - Optional.empty()).getRequest(); + private JRTServerConfigRequest createRequest(String configName, String configId, long generation, long timeout, String namespace) { + Request request = createWithParams(new ConfigKey<>(configName, configId, namespace, null), + DefContent.fromList(Collections.emptyList()), "fromHost", + PayloadChecksums.empty(), generation, timeout, Trace.createDummy(), + CompressionType.UNCOMPRESSED, Optional.empty()) + .getRequest(); return JRTServerConfigRequestV3.createFromRequest(request); } diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java index 3fedddc93a7..4ea6fa21a50 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.config.server.session; import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.component.Version; +import com.yahoo.concurrent.InThreadExecutorService; import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.application.api.FileRegistry; import com.yahoo.config.model.api.ContainerEndpoint; @@ -123,6 +124,7 @@ public class SessionPreparerTest { return new SessionPreparer( modelFactoryRegistry, new MockFileDistributionFactory(configserverConfig), + new InThreadExecutorService(), hostProvisionerProvider, new PermanentApplicationPackage(configserverConfig), configserverConfig, diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionTest.java index 1e1314311a6..83764686e5a 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionTest.java @@ -1,11 +1,11 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.session; +import com.yahoo.concurrent.InThreadExecutorService; import com.yahoo.config.application.api.ApplicationPackage; import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.provision.AllocatedHosts; import com.yahoo.config.provision.ApplicationId; -import com.yahoo.path.Path; import com.yahoo.vespa.config.server.application.ApplicationSet; import com.yahoo.vespa.config.server.host.HostValidator; import com.yahoo.vespa.curator.mock.MockCurator; @@ -24,7 +24,7 @@ public class SessionTest { public static class MockSessionPreparer extends SessionPreparer { public MockSessionPreparer() { - super(null, null, null, null, null, null, new MockCurator(), null, null, null); + super(null, null, new InThreadExecutorService(), null, null, null, null, new MockCurator(), null, null, null); } @Override diff --git a/container-core/src/main/java/com/yahoo/restapi/ByteArrayResponse.java b/container-core/src/main/java/com/yahoo/restapi/ByteArrayResponse.java new file mode 100644 index 00000000000..1299a2c6eb4 --- /dev/null +++ b/container-core/src/main/java/com/yahoo/restapi/ByteArrayResponse.java @@ -0,0 +1,26 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.restapi; + +import com.yahoo.container.jdisc.HttpResponse; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * @author freva + */ +public class ByteArrayResponse extends HttpResponse { + + private final byte[] data; + + public ByteArrayResponse(byte[] data) { + super(200); + this.data = data; + } + + @Override + public void render(OutputStream stream) throws IOException { + stream.write(data); + } + +} diff --git a/container-core/src/main/java/com/yahoo/restapi/StringResponse.java b/container-core/src/main/java/com/yahoo/restapi/StringResponse.java index 55ea22880de..003b58de827 100644 --- a/container-core/src/main/java/com/yahoo/restapi/StringResponse.java +++ b/container-core/src/main/java/com/yahoo/restapi/StringResponse.java @@ -1,27 +1,13 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.restapi; -import com.yahoo.container.jdisc.HttpResponse; - -import java.io.IOException; -import java.io.OutputStream; import java.nio.charset.StandardCharsets; /** * @author bratseth */ -public class StringResponse extends HttpResponse { - - private final String message; - +public class StringResponse extends ByteArrayResponse { public StringResponse(String message) { - super(200); - this.message = message; + super(message.getBytes(StandardCharsets.UTF_8)); } - - @Override - public void render(OutputStream stream) throws IOException { - stream.write(message.getBytes(StandardCharsets.UTF_8)); - } - } diff --git a/container-messagebus/src/main/java/com/yahoo/container/jdisc/messagebus/NetworkMultiplexerHolder.java b/container-messagebus/src/main/java/com/yahoo/container/jdisc/messagebus/NetworkMultiplexerHolder.java index 3c465835589..89ecc931efb 100644 --- a/container-messagebus/src/main/java/com/yahoo/container/jdisc/messagebus/NetworkMultiplexerHolder.java +++ b/container-messagebus/src/main/java/com/yahoo/container/jdisc/messagebus/NetworkMultiplexerHolder.java @@ -38,8 +38,10 @@ public class NetworkMultiplexerHolder extends AbstractComponent { @Override public void deconstruct() { synchronized (monitor) { - net.destroy(); - net = null; + if (net != null) { + net.destroy(); + net = null; + } destroyed = true; } } diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java index 49bdba2c90f..793d394801f 100644 --- a/container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java +++ b/container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java @@ -112,6 +112,7 @@ public class AllParser extends SimpleParser { protected Item negativeItem() { int position = tokens.getPosition(); Item item = null; + boolean isComposited = false; try { if ( ! tokens.skip(MINUS)) return null; if (tokens.currentIsNoIgnore(SPACE)) return null; @@ -121,6 +122,7 @@ public class AllParser extends SimpleParser { item = compositeItem(); if (item != null) { + isComposited = true; if (item instanceof OrItem) { // Turn into And AndItem and = new AndItem(); @@ -137,9 +139,11 @@ public class AllParser extends SimpleParser { // Heuristic overdrive engaged! // Interpret -N as a positive item matching a negative number (by backtracking out of this) // but not if there is an explicit index (such as -a:b) + // but interpret -(N) as a negative item matching a positive number // but interpret --N as a negative item matching a negative number if (item instanceof IntItem && ((IntItem)item).getIndexName().isEmpty() && + ! isComposited && ! ((IntItem)item).getNumber().startsWith(("-"))) item = null; diff --git a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java index cef8ae1751c..8ca711297d3 100644 --- a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java +++ b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java @@ -1970,6 +1970,13 @@ public class ParseTestCase { } @Test + public void testNegativeTermPositiveNumberInParentheses() { + tester.assertParsed("+a -12", "a -(12)", Query.Type.ALL); + tester.assertParsed("+a -(AND 12 15)", "a -(12 15)", Query.Type.ALL); + tester.assertParsed("+a -12 -15", "a -(12) -(15)", Query.Type.ALL); + } + + @Test public void testSingleNegativeNumberLikeTerm() { tester.assertParsed("-12", "-12", Query.Type.ALL); } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java index 0be32165916..3391965dc67 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java @@ -11,6 +11,7 @@ import com.yahoo.vespa.athenz.client.zms.ZmsClient; import java.time.Instant; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; public class AthenzAccessControlService implements AccessControlService { @@ -34,8 +35,8 @@ public class AthenzAccessControlService implements AccessControlService { if(!isVespaTeamMember(user)) { throw new IllegalArgumentException(String.format("User %s requires manual approval, please contact Vespa team", user.getName())); } - List<AthenzUser> users = zmsClient.listPendingRoleApprovals(dataPlaneAccessRole); - if (users.contains(user)) { + Map<AthenzUser, String> users = zmsClient.listPendingRoleApprovals(dataPlaneAccessRole); + if (users.containsKey(user)) { zmsClient.approvePendingRoleMembership(dataPlaneAccessRole, user, expiry); return true; } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzDbMock.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzDbMock.java index 02a6efb280b..899e3174df9 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzDbMock.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzDbMock.java @@ -42,6 +42,7 @@ public class AthenzDbMock { public final Set<AthenzIdentity> tenantAdmins = new HashSet<>(); public final Map<ApplicationId, Application> applications = new HashMap<>(); public final Map<String, Service> services = new HashMap<>(); + public final List<Role> roles = new ArrayList<>(); public final List<Policy> policies = new ArrayList<>(); public boolean isVespaTenant = false; @@ -51,7 +52,7 @@ public class AthenzDbMock { public Domain admin(AthenzIdentity identity) { admins.add(identity); - policies.add(new Policy(identity.getFullName(), ".*", ".*")); + policies.add(new Policy("admin", identity.getFullName(), ".*", ".*")); return this; } @@ -66,7 +67,7 @@ public class AthenzDbMock { } public Domain withPolicy(String principalRegex, String operation, String resource) { - policies.add(new Policy(principalRegex, operation, resource)); + policies.add(new Policy("admin", principalRegex, operation, resource)); return this; } @@ -105,16 +106,22 @@ public class AthenzDbMock { } public static class Policy { + private final String name; private final Pattern principal; private final Pattern action; private final Pattern resource; - public Policy(String principal, String action, String resource) { + public Policy(String name, String principal, String action, String resource) { + this.name = name; this.principal = Pattern.compile(principal); this.action = Pattern.compile(action); this.resource = Pattern.compile(resource); } + public String name() { + return name; + } + public boolean principalMatches(AthenzIdentity athenzIdentity) { return this.principal.matcher(athenzIdentity.getFullName()).matches(); } @@ -127,4 +134,16 @@ public class AthenzDbMock { return this.resource.matcher(resource).matches(); } } + + public static class Role { + private final String name; + + public Role(String name) { + this.name = name; + } + + public String name() { + return name; + } + } } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java index d067b7a5054..77a49c6cbff 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java @@ -18,12 +18,14 @@ import com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId; import java.time.Instant; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.logging.Level; import java.util.logging.Logger; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; /** * @author bjorncs @@ -145,8 +147,18 @@ public class ZmsClientMock implements ZmsClient { } @Override - public void addPolicyRule(AthenzDomain athenzDomain, String athenzPolicy, String action, AthenzResourceName resourceName, AthenzRole athenzRole) { + public void createPolicy(AthenzDomain athenzDomain, String athenzPolicy) { + List<AthenzDbMock.Policy> policies = athenz.getOrCreateDomain(athenzDomain).policies; + if (policies.stream().anyMatch(p -> p.name().equals(athenzPolicy))) { + throw new IllegalArgumentException("Policy already exists"); + } + // Policy will be created in the mock when an assertion is added + } + + @Override + public void addPolicyRule(AthenzDomain athenzDomain, String athenzPolicy, String action, AthenzResourceName resourceName, AthenzRole athenzRole) { + athenz.getOrCreateDomain(athenzDomain).policies.add(new AthenzDbMock.Policy(athenzPolicy, athenzRole.roleName(), action, resourceName.toResourceNameString())); } @Override @@ -155,8 +167,8 @@ public class ZmsClientMock implements ZmsClient { } @Override - public List<AthenzUser> listPendingRoleApprovals(AthenzRole athenzRole) { - return List.of(); + public Map<AthenzUser,String> listPendingRoleApprovals(AthenzRole athenzRole) { + return Map.of(); } @Override @@ -170,15 +182,42 @@ public class ZmsClientMock implements ZmsClient { @Override public List<AthenzService> listServices(AthenzDomain athenzDomain) { - return List.of(); + return athenz.getOrCreateDomain(athenzDomain).services.keySet().stream() + .map(serviceName -> new AthenzService(athenzDomain, serviceName)) + .collect(Collectors.toList()); } @Override public void createOrUpdateService(AthenzService athenzService) { + athenz.getOrCreateDomain(athenzService.getDomain()).services.put(athenzService.getName(), new AthenzDbMock.Service(false)); } @Override public void deleteService(AthenzService athenzService) { + athenz.getOrCreateDomain(athenzService.getDomain()).services.remove(athenzService.getName()); + } + + @Override + public void createRole(AthenzRole role, Map<String, Object> properties) { + List<AthenzDbMock.Role> roles = athenz.getOrCreateDomain(role.domain()).roles; + if (roles.stream().anyMatch(r -> r.name().equals(role.roleName()))) { + throw new IllegalArgumentException("Role already exists"); + } + roles.add(new AthenzDbMock.Role(role.roleName())); + } + + @Override + public Set<AthenzRole> listRoles(AthenzDomain domain) { + return athenz.getOrCreateDomain(domain).roles.stream() + .map(role -> new AthenzRole(domain, role.name())) + .collect(Collectors.toSet()); + } + + @Override + public Set<String> listPolicies(AthenzDomain domain) { + return athenz.getOrCreateDomain(domain).policies.stream() + .map(AthenzDbMock.Policy::name) + .collect(Collectors.toSet()); } @Override @@ -218,7 +257,7 @@ public class ZmsClientMock implements ZmsClient { } private static void log(String format, Object... args) { - log.log(Level.INFO, String.format(format, args)); + log.log(Level.FINE, String.format(format, args)); } } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java index dd9f8c38802..71f1821ff9a 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java @@ -1,10 +1,8 @@ // Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.api.integration.deployment; -import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ApplicationName; import com.yahoo.config.provision.TenantName; -import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; import java.time.Instant; @@ -21,13 +19,19 @@ import java.util.Optional; public interface ApplicationStore { /** Returns the tenant application package of the given version. */ - byte[] get(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion); + byte[] get(DeploymentId deploymentId, ApplicationVersion applicationVersion); + + /** Returns the application package diff, compared to the previous build, for the given tenant, application and build number */ + Optional<byte[]> getDiff(TenantName tenantName, ApplicationName applicationName, long buildNumber); + + /** Removes diffs for packages before the given build number */ + void pruneDiffs(TenantName tenantName, ApplicationName applicationName, long beforeBuildNumber); /** Find application package by given build number */ Optional<byte[]> find(TenantName tenant, ApplicationName application, long buildNumber); - /** Stores the given tenant application package of the given version. */ - void put(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] applicationPackage); + /** Stores the given tenant application package of the given version and diff since previous version. */ + void put(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] applicationPackage, byte[] diff); /** Removes applications older than the given version, for the given application, and returns whether something was removed. */ boolean prune(TenantName tenant, ApplicationName application, ApplicationVersion olderThanVersion); @@ -47,11 +51,14 @@ public interface ApplicationStore { /** Removes all tester packages for the given tester. */ void removeAllTesters(TenantName tenant, ApplicationName application); - /** Stores the given application package as the development package for the given application and zone. */ - void putDev(ApplicationId application, ZoneId zone, byte[] applicationPackage); + /** Returns the application package diff, compared to the previous build, for the given deployment and build number */ + Optional<byte[]> getDevDiff(DeploymentId deploymentId, long buildNumber); + + /** Removes diffs for dev packages before the given build number */ + void pruneDevDiffs(DeploymentId deploymentId, long beforeBuildNumber); - /** Returns the development package for the given application and zone. */ - byte[] getDev(ApplicationId application, ZoneId zone); + /** Stores the given application package as the development package for the given deployment and version and diff since previous version. */ + void putDev(DeploymentId deploymentId, ApplicationVersion version, byte[] applicationPackage, byte[] diff); /** Stores the given application meta data with the current time as part of the path. */ void putMeta(TenantName tenant, ApplicationName application, Instant now, byte[] metaZip); diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java index 30fd8fad1bd..f83809e84c2 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java @@ -23,7 +23,7 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> { */ public static final ApplicationVersion unknown = new ApplicationVersion(Optional.empty(), OptionalLong.empty(), Optional.empty(), Optional.empty(), Optional.empty(), - Optional.empty(), Optional.empty()); + Optional.empty(), Optional.empty(), true); // This never changes and is only used to create a valid semantic version number, as required by application bundles private static final String majorVersion = "1.0"; @@ -35,11 +35,12 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> { private final Optional<Instant> buildTime; private final Optional<String> sourceUrl; private final Optional<String> commit; + private final boolean deployedDirectly; /** Public for serialisation only. */ public ApplicationVersion(Optional<SourceRevision> source, OptionalLong buildNumber, Optional<String> authorEmail, - Optional<Version> compileVersion, Optional<Instant> buildTime, Optional<String> sourceUrl, - Optional<String> commit) { + Optional<Version> compileVersion, Optional<Instant> buildTime, Optional<String> sourceUrl, + Optional<String> commit, boolean deployedDirectly) { if (buildNumber.isEmpty() && ( source.isPresent() || authorEmail.isPresent() || compileVersion.isPresent() || buildTime.isPresent() || sourceUrl.isPresent() || commit.isPresent())) throw new IllegalArgumentException("Build number must be present if any other attribute is"); @@ -63,45 +64,37 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> { this.buildTime = buildTime; this.sourceUrl = Objects.requireNonNull(sourceUrl, "sourceUrl cannot be null"); this.commit = Objects.requireNonNull(commit, "commit cannot be null"); + this.deployedDirectly = deployedDirectly; } /** Create an application package version from a completed build, without an author email */ public static ApplicationVersion from(SourceRevision source, long buildNumber) { return new ApplicationVersion(Optional.of(source), OptionalLong.of(buildNumber), Optional.empty(), - Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); - } - - /** Creates an version from a completed build and an author email. */ - public static ApplicationVersion from(SourceRevision source, long buildNumber, String authorEmail) { - return new ApplicationVersion(Optional.of(source), OptionalLong.of(buildNumber), Optional.of(authorEmail), - Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); + Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), false); } /** Creates an version from a completed build, an author email, and build meta data. */ public static ApplicationVersion from(SourceRevision source, long buildNumber, String authorEmail, Version compileVersion, Instant buildTime) { return new ApplicationVersion(Optional.of(source), OptionalLong.of(buildNumber), Optional.of(authorEmail), - Optional.of(compileVersion), Optional.of(buildTime), Optional.empty(), Optional.empty()); + Optional.of(compileVersion), Optional.of(buildTime), Optional.empty(), Optional.empty(), false); } /** Creates an version from a completed build, an author email, and build meta data. */ public static ApplicationVersion from(Optional<SourceRevision> source, long buildNumber, Optional<String> authorEmail, Optional<Version> compileVersion, Optional<Instant> buildTime, - Optional<String> sourceUrl, Optional<String> commit) { - return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail, compileVersion, buildTime, sourceUrl, commit); + Optional<String> sourceUrl, Optional<String> commit, boolean deployedDirectly) { + return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail, compileVersion, buildTime, sourceUrl, commit, deployedDirectly); } /** Returns an unique identifier for this version or "unknown" if version is not known */ public String id() { - if (isUnknown()) { - return "unknown"; - } - return String.format("%s.%d-%s", - majorVersion, - buildNumber.getAsLong(), - source.map(SourceRevision::commit).map(ApplicationVersion::abbreviateCommit) - .or(this::commit) - .orElse("unknown")); + if (isUnknown()) return "unknown"; + + return source.map(SourceRevision::commit).map(ApplicationVersion::abbreviateCommit) + .or(this::commit) + .map(commit -> String.format("%s.%d-%s", majorVersion, buildNumber.getAsLong(), commit)) + .orElseGet(() -> majorVersion + "." + buildNumber.getAsLong()); } /** @@ -142,18 +135,24 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> { return this.equals(unknown); } + /** Returns whether the application package for this version was deployed directly to zone */ + public boolean isDeployedDirectly() { + return deployedDirectly; + } + @Override public boolean equals(Object o) { if (this == o) return true; if ( ! (o instanceof ApplicationVersion)) return false; ApplicationVersion that = (ApplicationVersion) o; return Objects.equals(buildNumber, that.buildNumber) - && Objects.equals(commit(), that.commit()); + && Objects.equals(commit(), that.commit()) + && deployedDirectly == that.deployedDirectly; } @Override public int hashCode() { - return Objects.hash(buildNumber, commit()); + return Objects.hash(buildNumber, commit(), deployedDirectly); } @Override @@ -175,6 +174,9 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> { if (buildNumber().isEmpty() || o.buildNumber().isEmpty()) return Boolean.compare(buildNumber().isPresent(), o.buildNumber.isPresent()); // Unknown version sorts first + if (deployedDirectly || o.deployedDirectly) + return Boolean.compare(deployedDirectly, o.deployedDirectly); // Directly deployed versions sort first + return Long.compare(buildNumber().getAsLong(), o.buildNumber().getAsLong()); } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java index 135429be8f9..1306f4846c2 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java @@ -103,6 +103,7 @@ enum PathGroup { applicationInfo(Matcher.tenant, Matcher.application, "/application/v4/tenant/{tenant}/application/{application}/package", + "/application/v4/tenant/{tenant}/application/{application}/diff/{number}", "/application/v4/tenant/{tenant}/application/{application}/compile-version", "/application/v4/tenant/{tenant}/application/{application}/deployment", "/application/v4/tenant/{tenant}/application/{application}/deploying/{*}", diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java index 6557247e21a..fe5fc90df60 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java @@ -27,7 +27,6 @@ import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeploymentData import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; import com.yahoo.vespa.hosted.controller.api.identifiers.InstanceId; import com.yahoo.vespa.hosted.controller.api.identifiers.RevisionId; -import com.yahoo.vespa.hosted.controller.api.integration.aws.TenantRoles; import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingController; import com.yahoo.vespa.hosted.controller.api.integration.billing.Quota; import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateMetadata; @@ -45,8 +44,8 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId; import com.yahoo.vespa.hosted.controller.api.integration.noderepository.RestartFilter; import com.yahoo.vespa.hosted.controller.api.integration.secrets.TenantSecretStore; import com.yahoo.vespa.hosted.controller.application.ActivateResult; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackageValidator; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackageValidator; import com.yahoo.vespa.hosted.controller.application.Deployment; import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics; import com.yahoo.vespa.hosted.controller.application.DeploymentQuotaCalculator; @@ -332,11 +331,6 @@ public class ApplicationController { }); } - /** Fetches the requested application package from the artifact store(s). */ - public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) { - return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version)); - } - /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) @@ -372,7 +366,7 @@ public class ApplicationController { Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); - ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision); + ApplicationPackage applicationPackage = new ApplicationPackage(applicationStore.get(new DeploymentId(job.application(), zone), revision)); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); @@ -828,11 +822,6 @@ public class ApplicationController { return DeploymentQuotaCalculator.calculateQuotaUsage(application); } - private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) { - return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone) - : applicationStore.get(application.tenant(), application.application(), revision)); - } - /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java index 33eafecf60a..ff266e18bb6 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java @@ -36,7 +36,7 @@ public final class Change { private Change(Optional<Version> platform, Optional<ApplicationVersion> application, boolean pinned) { this.platform = requireNonNull(platform, "platform cannot be null"); this.application = requireNonNull(application, "application cannot be null"); - if (application.isPresent() && application.get().isUnknown()) { + if (application.isPresent() && (application.get().isUnknown() || application.get().isDeployedDirectly())) { throw new IllegalArgumentException("Application version to deploy must be a known version"); } this.pinned = pinned; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackage.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java index c29bc3f3f5e..3fcf9fc41f2 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackage.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java @@ -1,5 +1,5 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.controller.application; +package com.yahoo.vespa.hosted.controller.application.pkg; import com.google.common.hash.Hashing; import com.yahoo.component.Version; @@ -251,10 +251,11 @@ public class ApplicationPackage { private Map<Path, Optional<byte[]>> read(Collection<String> names) { var entries = new ZipStreamReader(new ByteArrayInputStream(zip), name -> names.contains(withoutLegacyDir(name)), - maxSize) + maxSize, + true) .entries().stream() .collect(toMap(entry -> Paths.get(withoutLegacyDir(entry.zipEntry().getName())).normalize(), - entry -> Optional.of(entry.content()))); + ZipStreamReader.ZipEntryWithContent::content)); names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty())); return entries; } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiff.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiff.java new file mode 100644 index 00000000000..97810b9de80 --- /dev/null +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiff.java @@ -0,0 +1,112 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.application.pkg; + +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.yahoo.vespa.hosted.controller.application.pkg.ZipStreamReader.ZipEntryWithContent; + +/** + * @author freva + */ +public class ApplicationPackageDiff { + + public static byte[] diffAgainstEmpty(ApplicationPackage right) { + byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + return diff(new ApplicationPackage(emptyZip), right); + } + + public static byte[] diff(ApplicationPackage left, ApplicationPackage right) { + return diff(left, right, 10 << 20, 1 << 20, 10 << 20); + } + + static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) { + if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8); + + Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff); + Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff); + + StringBuilder sb = new StringBuilder(); + List<String> files = Stream.of(leftContents, rightContents) + .flatMap(contents -> contents.keySet().stream()) + .sorted() + .distinct() + .collect(Collectors.toList()); + for (String file : files) { + if (sb.length() > maxTotalDiffSize) + sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n"); + else + diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile) + .ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n')); + } + + return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8); + } + + private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) { + Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content); + Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content); + if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get())) + return Optional.empty(); + + if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty())) + return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n", + left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted"))); + + if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c))) + return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n", + left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted"))); + + return LinesComparator.diff( + leftContent.map(c -> lines(c)).orElseGet(List::of), + rightContent.map(c -> lines(c)).orElseGet(List::of)) + .map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff); + } + + private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) { + return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream() + .collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e)); + } + + private static List<String> lines(byte[] data) { + List<String> lines = new ArrayList<>(Math.min(16, data.length / 100)); + try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) { + String line; + while ((line = bufferedReader.readLine()) != null) { + lines.add(line); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + return lines; + } + + private static boolean isBinary(byte[] data) { + if (data.length == 0) return false; + + int lengthToCheck = Math.min(data.length, 10000); + int ascii = 0; + + for (int i = 0; i < lengthToCheck; i++) { + byte b = data[i]; + if (b < 0x9) return true; + + // TAB, newline/line feed, carriage return + if (b == 0x9 || b == 0xA || b == 0xD) ascii++; + else if (b >= 0x20 && b <= 0x7E) ascii++; + } + + return (double) ascii / lengthToCheck < 0.95; + } +} diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackageValidator.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java index bb2d8b3c553..e9edbbc767c 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackageValidator.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java @@ -1,5 +1,5 @@ // Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.controller.application; +package com.yahoo.vespa.hosted.controller.application.pkg; import com.yahoo.config.application.api.DeploymentInstanceSpec; import com.yahoo.config.application.api.DeploymentSpec; @@ -14,6 +14,7 @@ import com.yahoo.config.provision.zone.ZoneApi; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.hosted.controller.Application; import com.yahoo.vespa.hosted.controller.Controller; +import com.yahoo.vespa.hosted.controller.application.EndpointId; import com.yahoo.vespa.hosted.controller.deployment.DeploymentSteps; import java.time.Instant; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparator.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparator.java new file mode 100644 index 00000000000..8b4791c6b1b --- /dev/null +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparator.java @@ -0,0 +1,246 @@ +/* + * Line based variant of Apache commons-text StringComparator + * https://github.com/apache/commons-text/blob/3b1a0a5a47ee9fa2b36f99ca28e2e1d367a10a11/src/main/java/org/apache/commons/text/diff/StringsComparator.java + */ + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.yahoo.vespa.hosted.controller.application.pkg; + +import com.yahoo.collections.Pair; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +/** + * <p> + * It is guaranteed that the comparisons will always be done as + * {@code o1.equals(o2)} where {@code o1} belongs to the first + * sequence and {@code o2} belongs to the second sequence. This can + * be important if subclassing is used for some elements in the first + * sequence and the {@code equals} method is specialized. + * </p> + * <p> + * Comparison can be seen from two points of view: either as giving the smallest + * modification allowing to transform the first sequence into the second one, or + * as giving the longest sequence which is a subsequence of both initial + * sequences. The {@code equals} method is used to compare objects, so any + * object can be put into sequences. Modifications include deleting, inserting + * or keeping one object, starting from the beginning of the first sequence. + * </p> + * <p> + * This class implements the comparison algorithm, which is the very efficient + * algorithm from Eugene W. Myers + * <a href="http://www.cis.upenn.edu/~bcpierce/courses/dd/papers/diff.ps"> + * An O(ND) Difference Algorithm and Its Variations</a>. + */ +public class LinesComparator { + + private final List<String> left; + private final List<String> right; + private final int[] vDown; + private final int[] vUp; + + private LinesComparator(List<String> left, List<String> right) { + this.left = left; + this.right = right; + + int size = left.size() + right.size() + 2; + vDown = new int[size]; + vUp = new int[size]; + } + + private void buildScript(int start1, int end1, int start2, int end2, List<Pair<LineOperation, String>> result) { + Snake middle = getMiddleSnake(start1, end1, start2, end2); + + if (middle == null + || middle.start == end1 && middle.diag == end1 - end2 + || middle.end == start1 && middle.diag == start1 - start2) { + + int i = start1; + int j = start2; + while (i < end1 || j < end2) { + if (i < end1 && j < end2 && left.get(i).equals(right.get(j))) { + result.add(new Pair<>(LineOperation.keep, left.get(i))); + ++i; + ++j; + } else { + if (end1 - start1 > end2 - start2) { + result.add(new Pair<>(LineOperation.delete, left.get(i))); + ++i; + } else { + result.add(new Pair<>(LineOperation.insert, right.get(j))); + ++j; + } + } + } + + } else { + buildScript(start1, middle.start, start2, middle.start - middle.diag, result); + for (int i = middle.start; i < middle.end; ++i) { + result.add(new Pair<>(LineOperation.keep, left.get(i))); + } + buildScript(middle.end, end1, middle.end - middle.diag, end2, result); + } + } + + private Snake buildSnake(final int start, final int diag, final int end1, final int end2) { + int end = start; + while (end - diag < end2 && end < end1 && left.get(end).equals(right.get(end - diag))) { + ++end; + } + return new Snake(start, end, diag); + } + + private Snake getMiddleSnake(final int start1, final int end1, final int start2, final int end2) { + final int m = end1 - start1; + final int n = end2 - start2; + if (m == 0 || n == 0) { + return null; + } + + final int delta = m - n; + final int sum = n + m; + final int offset = (sum % 2 == 0 ? sum : sum + 1) / 2; + vDown[1 + offset] = start1; + vUp[1 + offset] = end1 + 1; + + for (int d = 0; d <= offset; ++d) { + // Down + for (int k = -d; k <= d; k += 2) { + // First step + + final int i = k + offset; + if (k == -d || k != d && vDown[i - 1] < vDown[i + 1]) { + vDown[i] = vDown[i + 1]; + } else { + vDown[i] = vDown[i - 1] + 1; + } + + int x = vDown[i]; + int y = x - start1 + start2 - k; + + while (x < end1 && y < end2 && left.get(x).equals(right.get(y))) { + vDown[i] = ++x; + ++y; + } + // Second step + if (delta % 2 != 0 && delta - d <= k && k <= delta + d) { + if (vUp[i - delta] <= vDown[i]) { // NOPMD + return buildSnake(vUp[i - delta], k + start1 - start2, end1, end2); + } + } + } + + // Up + for (int k = delta - d; k <= delta + d; k += 2) { + // First step + final int i = k + offset - delta; + if (k == delta - d || k != delta + d && vUp[i + 1] <= vUp[i - 1]) { + vUp[i] = vUp[i + 1] - 1; + } else { + vUp[i] = vUp[i - 1]; + } + + int x = vUp[i] - 1; + int y = x - start1 + start2 - k; + while (x >= start1 && y >= start2 && left.get(x).equals(right.get(y))) { + vUp[i] = x--; + y--; + } + // Second step + if (delta % 2 == 0 && -d <= k && k <= d) { + if (vUp[i] <= vDown[i + delta]) { // NOPMD + return buildSnake(vUp[i], k + start1 - start2, end1, end2); + } + } + } + } + + // this should not happen + throw new RuntimeException("Internal Error"); + } + + private static class Snake { + private final int start; + private final int end; + private final int diag; + + private Snake(int start, int end, int diag) { + this.start = start; + this.end = end; + this.diag = diag; + } + } + + private enum LineOperation { + keep(" "), delete("- "), insert("+ "); + private final String prefix; + LineOperation(String prefix) { + this.prefix = prefix; + } + } + + /** @return line-based diff in unified format. Empty contents are identical. */ + public static Optional<String> diff(List<String> left, List<String> right) { + List<Pair<LineOperation, String>> changes = new ArrayList<>(Math.max(left.size(), right.size())); + new LinesComparator(left, right).buildScript(0, left.size(), 0, right.size(), changes); + + // After we have a list of keep, delete, insert for each line from left and right input, generate a unified + // diff by printing all delete and insert operations with contextLines of keep lines before and after. + // Make sure the change windows are non-overlapping by continuously growing the window + int contextLines = 3; + List<int[]> changeWindows = new ArrayList<>(); + int[] last = null; + for (int i = 0, leftIndex = 0, rightIndex = 0; i < changes.size(); i++) { + if (changes.get(i).getFirst() == LineOperation.keep) { + leftIndex++; + rightIndex++; + continue; + } + + // We found a new change and it is too far away from the previous change to be combined into the same window + if (last == null || i - last[1] > contextLines) { + last = new int[]{Math.max(i - contextLines, 0), Math.min(i + contextLines + 1, changes.size()), Math.max(leftIndex - contextLines, 0), Math.max(rightIndex - contextLines, 0)}; + changeWindows.add(last); + } else // otherwise, extend the previous change window + last[1] = Math.min(i + contextLines + 1, changes.size()); + + if (changes.get(i).getFirst() == LineOperation.delete) leftIndex++; + else rightIndex++; + } + if (changeWindows.isEmpty()) return Optional.empty(); + + StringBuilder sb = new StringBuilder(); + for (int[] changeWindow: changeWindows) { + int start = changeWindow[0], end = changeWindow[1], leftIndex = changeWindow[2], rightIndex = changeWindow[3]; + Map<LineOperation, Long> counts = IntStream.range(start, end) + .mapToObj(i -> changes.get(i).getFirst()) + .collect(Collectors.groupingBy(i -> i, Collectors.counting())); + sb.append("@@ -").append(leftIndex + 1).append(',').append(end - start - counts.getOrDefault(LineOperation.insert, 0L)) + .append(" +").append(rightIndex + 1).append(',').append(end - start - counts.getOrDefault(LineOperation.delete, 0L)).append(" @@\n"); + for (int i = start; i < end; i++) + sb.append(changes.get(i).getFirst().prefix).append(changes.get(i).getSecond()).append('\n'); + } + return Optional.of(sb.toString()); + } +} diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ZipStreamReader.java index 4f01df21430..7ddd0af7a7a 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ZipStreamReader.java @@ -1,5 +1,5 @@ // Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.controller.application; +package com.yahoo.vespa.hosted.controller.application.pkg; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -10,6 +10,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.function.Predicate; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; @@ -23,16 +24,15 @@ public class ZipStreamReader { private final List<ZipEntryWithContent> entries = new ArrayList<>(); private final int maxEntrySizeInBytes; - public ZipStreamReader(InputStream input, Predicate<String> entryNameMatcher, int maxEntrySizeInBytes) { + public ZipStreamReader(InputStream input, Predicate<String> entryNameMatcher, int maxEntrySizeInBytes, boolean throwIfEntryExceedsMaxSize) { this.maxEntrySizeInBytes = maxEntrySizeInBytes; try (ZipInputStream zipInput = new ZipInputStream(input)) { ZipEntry zipEntry; while (null != (zipEntry = zipInput.getNextEntry())) { if (!entryNameMatcher.test(requireName(zipEntry.getName()))) continue; - entries.add(new ZipEntryWithContent(zipEntry, readContent(zipInput))); + entries.add(readContent(zipEntry, zipInput, throwIfEntryExceedsMaxSize)); } - } catch (IOException e) { throw new UncheckedIOException("IO error reading zip content", e); } @@ -59,7 +59,7 @@ public class ZipStreamReader { } } - private byte[] readContent(ZipInputStream zipInput) { + private ZipEntryWithContent readContent(ZipEntry zipEntry, ZipInputStream zipInput, boolean throwIfEntryExceedsMaxSize) { try (ByteArrayOutputStream bis = new ByteArrayOutputStream()) { byte[] buffer = new byte[2048]; int read; @@ -67,12 +67,15 @@ public class ZipStreamReader { while ( -1 != (read = zipInput.read(buffer))) { size += read; if (size > maxEntrySizeInBytes) { - throw new IllegalArgumentException("Entry in zip content exceeded size limit of " + - maxEntrySizeInBytes + " bytes"); - } - bis.write(buffer, 0, read); + if (throwIfEntryExceedsMaxSize) throw new IllegalArgumentException( + "Entry in zip content exceeded size limit of " + maxEntrySizeInBytes + " bytes"); + } else bis.write(buffer, 0, read); } - return bis.toByteArray(); + + boolean hasContent = size <= maxEntrySizeInBytes; + return new ZipEntryWithContent(zipEntry, + Optional.of(bis).filter(__ -> hasContent).map(ByteArrayOutputStream::toByteArray), + size); } catch (IOException e) { throw new UncheckedIOException("Failed reading from zipped content", e); } @@ -96,16 +99,19 @@ public class ZipStreamReader { public static class ZipEntryWithContent { private final ZipEntry zipEntry; - private final byte[] content; + private final Optional<byte[]> content; + private final long size; - public ZipEntryWithContent(ZipEntry zipEntry, byte[] content) { + public ZipEntryWithContent(ZipEntry zipEntry, Optional<byte[]> content, long size) { this.zipEntry = zipEntry; this.content = content; + this.size = size; } public ZipEntry zipEntry() { return zipEntry; } - public byte[] content() { return content; } - + public byte[] contentOrThrow() { return content.orElseThrow(); } + public Optional<byte[]> content() { return content; } + public long size() { return size; } } } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java index 986e89d03f5..eda6051ed07 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java @@ -41,7 +41,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId; import com.yahoo.vespa.hosted.controller.api.integration.organization.DeploymentFailureMails; import com.yahoo.vespa.hosted.controller.api.integration.organization.Mail; import com.yahoo.vespa.hosted.controller.application.ActivateResult; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Deployment; import com.yahoo.vespa.hosted.controller.application.Endpoint; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java index b622fc0bd75..da5282d8e93 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java @@ -12,7 +12,6 @@ import com.yahoo.vespa.hosted.controller.Controller; import com.yahoo.vespa.hosted.controller.Instance; import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; import com.yahoo.vespa.hosted.controller.api.integration.LogEntry; -import com.yahoo.vespa.hosted.controller.api.integration.configserver.NotFoundException; import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; @@ -22,13 +21,13 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.TestReport; import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud; import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId; import com.yahoo.vespa.hosted.controller.application.ApplicationList; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Deployment; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackageDiff; import com.yahoo.vespa.hosted.controller.persistence.BufferedLogStore; import com.yahoo.vespa.hosted.controller.persistence.CuratorDb; -import java.net.URI; import java.security.cert.X509Certificate; import java.time.Duration; import java.time.Instant; @@ -48,7 +47,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.UnaryOperator; import java.util.logging.Level; -import java.util.stream.Collectors; import java.util.stream.Stream; import static com.google.common.collect.ImmutableList.copyOf; @@ -369,7 +367,8 @@ public class JobController { List<Lock> locks = new ArrayList<>(); try { // Ensure no step is still running before we finish the run — report depends transitively on all the other steps. - for (Step step : report.allPrerequisites(run(id).get().steps().keySet())) + Run unlockedRun = run(id).get(); + for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.add(curator.lock(id.application(), id.type(), step)); locked(id, run -> { // Store the modified run after it has been written to history, in case the latter fails. @@ -400,6 +399,20 @@ public class JobController { metric.jobFinished(run.id().job(), finishedRun.status()); return finishedRun; }); + + DeploymentId deploymentId = new DeploymentId(unlockedRun.id().application(), unlockedRun.id().job().type().zone(controller.system())); + (unlockedRun.versions().targetApplication().isDeployedDirectly() ? + Stream.of(unlockedRun.id().type()) : + JobType.allIn(controller.system()).stream().filter(jobType -> !jobType.environment().isManuallyDeployed())) + .flatMap(jobType -> controller.jobController().runs(unlockedRun.id().application(), jobType).values().stream()) + .mapToLong(run -> run.versions().targetApplication().buildNumber().orElse(Integer.MAX_VALUE)) + .min() + .ifPresent(oldestBuild -> { + if (unlockedRun.versions().targetApplication().isDeployedDirectly()) + controller.applications().applicationStore().pruneDevDiffs(deploymentId, oldestBuild); + else + controller.applications().applicationStore().pruneDiffs(deploymentId.applicationId().tenant(), deploymentId.applicationId().application(), oldestBuild); + }); } finally { for (Lock lock : locks) @@ -425,12 +438,19 @@ public class JobController { applicationPackage.compileVersion(), applicationPackage.buildTime(), sourceUrl, - revision.map(SourceRevision::commit))); + revision.map(SourceRevision::commit), + false)); + byte[] diff = application.get().latestVersion() + .map(v -> v.buildNumber().getAsLong()) + .flatMap(prevBuild -> controller.applications().applicationStore().find(id.tenant(), id.application(), prevBuild)) + .map(prevApplication -> ApplicationPackageDiff.diff(new ApplicationPackage(prevApplication), applicationPackage)) + .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); controller.applications().applicationStore().put(id.tenant(), id.application(), version.get(), - applicationPackage.zippedContent()); + applicationPackage.zippedContent(), + diff); controller.applications().applicationStore().putTester(id.tenant(), id.application(), version.get(), @@ -480,16 +500,26 @@ public class JobController { controller.applications().store(application); }); - last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id())); + DeploymentId deploymentId = new DeploymentId(id, type.zone(controller.system())); + Optional<Run> lastRun = last(id, type); + lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id())); + + long build = 1 + lastRun.map(run -> run.versions().targetApplication().buildNumber().orElse(0)).orElse(0L); + ApplicationVersion version = ApplicationVersion.from(Optional.empty(), build, Optional.empty(), Optional.empty(), + Optional.empty(), Optional.empty(), Optional.empty(), true); + + byte[] diff = lastRun.map(run -> run.versions().targetApplication()) + .map(prevVersion -> ApplicationPackageDiff.diff(new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)), applicationPackage)) + .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { - controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent()); + controller.applications().applicationStore().putDev(deploymentId, version, applicationPackage.zippedContent(), diff); start(id, type, new Versions(platform.orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(controller.applications()::lastCompatibleVersion) .orElseGet(controller::readSystemVersion)), - ApplicationVersion.unknown, + version, Optional.empty(), Optional.empty()), false, @@ -558,7 +588,7 @@ public class JobController { application.get().productionDeployments().values().stream() .flatMap(List::stream) .map(Deployment::applicationVersion) - .filter(version -> ! version.isUnknown()) + .filter(version -> ! version.isUnknown() && ! version.isDeployedDirectly()) .min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong())) .ifPresent(oldestDeployed -> { controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java index a69af024b96..0039ce2320e 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java @@ -1,13 +1,14 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.maintenance; +import com.yahoo.component.Version; import com.yahoo.vespa.hosted.controller.Application; import com.yahoo.vespa.hosted.controller.Controller; import com.yahoo.vespa.hosted.controller.Instance; -import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; import com.yahoo.vespa.hosted.controller.application.Deployment; +import com.yahoo.vespa.hosted.controller.deployment.Run; import com.yahoo.vespa.hosted.controller.deployment.Versions; import com.yahoo.yolean.Exceptions; @@ -32,7 +33,8 @@ public class DeploymentUpgrader extends ControllerMaintainer { protected double maintain() { AtomicInteger attempts = new AtomicInteger(); AtomicInteger failures = new AtomicInteger(); - Versions target = new Versions(controller().readSystemVersion(), ApplicationVersion.unknown, Optional.empty(), Optional.empty()); + Version systemVersion = controller().readSystemVersion(); + for (Application application : controller().applications().readable()) for (Instance instance : application.instances().values()) for (Deployment deployment : instance.deployments().values()) @@ -40,8 +42,11 @@ public class DeploymentUpgrader extends ControllerMaintainer { attempts.incrementAndGet(); JobId job = new JobId(instance.id(), JobType.from(controller().system(), deployment.zone()).get()); if ( ! deployment.zone().environment().isManuallyDeployed()) continue; + + Run last = controller().jobController().last(job).get(); + Versions target = new Versions(systemVersion, last.versions().targetApplication(), Optional.empty(), Optional.empty()); if ( ! deployment.version().isBefore(target.targetPlatform())) continue; - if ( controller().clock().instant().isBefore(controller().jobController().last(job).get().start().plus(Duration.ofDays(1)))) continue; + if ( controller().clock().instant().isBefore(last.start().plus(Duration.ofDays(1)))) continue; if ( ! isLikelyNightFor(job)) continue; log.log(Level.FINE, "Upgrading deployment of " + instance.id() + " in " + deployment.zone()); diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainer.java index 6b509e82dba..53e29df597d 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainer.java @@ -6,7 +6,9 @@ import com.yahoo.vespa.hosted.controller.Application; import com.yahoo.vespa.hosted.controller.Controller; import com.yahoo.vespa.hosted.controller.api.integration.user.Roles; import com.yahoo.vespa.hosted.controller.api.integration.user.UserManagement; +import com.yahoo.vespa.hosted.controller.api.role.ApplicationRole; import com.yahoo.vespa.hosted.controller.api.role.Role; +import com.yahoo.vespa.hosted.controller.api.role.TenantRole; import java.time.Duration; import java.util.List; @@ -57,6 +59,7 @@ public class UserManagementMaintainer extends ControllerMaintainer { .collect(Collectors.toList()); return userManagement.listRoles().stream() + .filter(role -> role instanceof TenantRole || role instanceof ApplicationRole) .filter(role -> !tenantRoles.contains(role) && !applicationRoles.contains(role)) .collect(Collectors.toList()); } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java index 26fb4be04af..a5db6a152dd 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java @@ -107,6 +107,7 @@ public class ApplicationSerializer { private static final String branchField = "branchField"; private static final String commitField = "commitField"; private static final String authorEmailField = "authorEmailField"; + private static final String deployedDirectlyField = "deployedDirectly"; private static final String compileVersionField = "compileVersion"; private static final String buildTimeField = "buildTime"; private static final String sourceUrlField = "sourceUrl"; @@ -228,6 +229,7 @@ public class ApplicationSerializer { applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); applicationVersion.sourceUrl().ifPresent(url -> object.setString(sourceUrlField, url)); applicationVersion.commit().ifPresent(commit -> object.setString(commitField, commit)); + object.setBool(deployedDirectlyField, applicationVersion.isDeployedDirectly()); } private void toSlime(SourceRevision sourceRevision, Cursor object) { @@ -422,7 +424,11 @@ public class ApplicationSerializer { Optional<String> sourceUrl = SlimeUtils.optionalString(object.field(sourceUrlField)); Optional<String> commit = SlimeUtils.optionalString(object.field(commitField)); - return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit); + // TODO (freva): Simplify once this has rolled out everywhere + Inspector deployedDirectlyInspector = object.field(deployedDirectlyField); + boolean deployedDirectly = deployedDirectlyInspector.valid() && deployedDirectlyInspector.asBool(); + + return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit, deployedDirectly); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java index 8ffa4823ead..b4a580a1562 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java @@ -89,6 +89,7 @@ class RunSerializer { private static final String branchField = "branch"; private static final String commitField = "commit"; private static final String authorEmailField = "authorEmail"; + private static final String deployedDirectlyField = "deployedDirectly"; private static final String compileVersionField = "compileVersion"; private static final String buildTimeField = "buildTime"; private static final String sourceUrlField = "sourceUrl"; @@ -175,8 +176,12 @@ class RunSerializer { Optional<String> sourceUrl = SlimeUtils.optionalString(versionObject.field(sourceUrlField)); Optional<String> commit = SlimeUtils.optionalString(versionObject.field(commitField)); + // TODO (freva): Simplify once this has rolled out everywhere + Inspector deployedDirectlyInspector = versionObject.field(deployedDirectlyField); + boolean deployedDirectly = deployedDirectlyInspector.valid() && deployedDirectlyInspector.asBool(); + return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail, - compileVersion, buildTime, sourceUrl, commit); + compileVersion, buildTime, sourceUrl, commit, deployedDirectly); } // Don't change this — introduce a separate array instead. @@ -259,6 +264,7 @@ class RunSerializer { applicationVersion.buildTime().ifPresent(time -> versionsObject.setLong(buildTimeField, time.toEpochMilli())); applicationVersion.sourceUrl().ifPresent(url -> versionsObject.setString(sourceUrlField, url)); applicationVersion.commit().ifPresent(commit -> versionsObject.setString(commitField, commit)); + versionsObject.setBool(deployedDirectlyField, applicationVersion.isDeployedDirectly()); } // Don't change this - introduce a separate array with new values if needed. diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java index 60f9ab8dc2a..22bd3c9d062 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java @@ -25,6 +25,7 @@ import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.jdisc.LoggingRequestHandler; import com.yahoo.io.IOUtils; +import com.yahoo.restapi.ByteArrayResponse; import com.yahoo.restapi.ErrorResponse; import com.yahoo.restapi.MessageResponse; import com.yahoo.restapi.Path; @@ -76,7 +77,7 @@ import com.yahoo.vespa.hosted.controller.api.role.Role; import com.yahoo.vespa.hosted.controller.api.role.RoleDefinition; import com.yahoo.vespa.hosted.controller.api.role.SecurityContext; import com.yahoo.vespa.hosted.controller.application.ActivateResult; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.AssignedRotation; import com.yahoo.vespa.hosted.controller.application.Change; import com.yahoo.vespa.hosted.controller.application.Deployment; @@ -245,6 +246,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler { if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); + if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request); @@ -255,6 +257,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler { if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); + if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); @@ -592,13 +595,20 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler { throw new IllegalArgumentException("Only manually deployed zones have dev packages"); ZoneId zone = type.zone(controller.system()); - byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone); + ApplicationVersion version = controller.jobController().last(id, type).get().versions().targetApplication(); + byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), version); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } + private HttpResponse devApplicationPackageDiff(RunId runId) { + DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone(controller.system())); + return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) + .map(ByteArrayResponse::new) + .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); + } + private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); - var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value()); long buildNumber; var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> { @@ -628,6 +638,13 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler { return new ZipResponse(filename, applicationPackage.get()); } + private HttpResponse applicationPackageDiff(String tenant, String application, String number) { + TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); + return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) + .map(ByteArrayResponse::new) + .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); + } + private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); @@ -2085,10 +2102,20 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler { if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } + Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); + int artifactEntries = artifactsCursor.entries(); + if (artifactEntries == 0) { + throw new IllegalArgumentException("Missing or empty 'artifacts'"); + } + Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); + Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); + for (int i = 0; i < artifactEntries; i++) { + dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); + } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java index 260fc9628e9..3fd221abe10 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java @@ -20,7 +20,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Change; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; import com.yahoo.vespa.hosted.controller.deployment.ConvergenceSummary; @@ -40,12 +40,10 @@ import java.time.Instant; import java.time.format.TextStyle; import java.util.Arrays; import java.util.Collection; -import java.util.Comparator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; -import java.util.stream.Collectors; import java.util.stream.Stream; import static com.yahoo.config.application.api.DeploymentSpec.UpgradePolicy.canary; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java index 6f5b1f30592..c116aa43c0d 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java @@ -3,22 +3,36 @@ package com.yahoo.vespa.hosted.controller.restapi.horizon; import com.google.inject.Inject; import com.yahoo.config.provision.SystemName; +import com.yahoo.config.provision.TenantName; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.jdisc.LoggingRequestHandler; import com.yahoo.restapi.ErrorResponse; import com.yahoo.restapi.Path; +import com.yahoo.vespa.flags.BooleanFlag; +import com.yahoo.vespa.flags.FetchVector; +import com.yahoo.vespa.flags.FlagSource; +import com.yahoo.vespa.flags.Flags; import com.yahoo.vespa.hosted.controller.Controller; +import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingController; +import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId; import com.yahoo.vespa.hosted.controller.api.integration.horizon.HorizonClient; import com.yahoo.vespa.hosted.controller.api.integration.horizon.HorizonResponse; +import com.yahoo.vespa.hosted.controller.api.role.Role; +import com.yahoo.vespa.hosted.controller.api.role.RoleDefinition; import com.yahoo.vespa.hosted.controller.api.role.SecurityContext; +import com.yahoo.vespa.hosted.controller.api.role.TenantRole; import com.yahoo.yolean.Exceptions; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.EnumSet; +import java.util.HashSet; import java.util.Optional; +import java.util.Set; import java.util.logging.Level; +import java.util.stream.Collectors; /** * Proxies metrics requests from Horizon UI @@ -27,22 +41,36 @@ import java.util.logging.Level; */ public class HorizonApiHandler extends LoggingRequestHandler { + private final BillingController billingController; private final SystemName systemName; private final HorizonClient client; + private final BooleanFlag enabledHorizonDashboard; + + private static final EnumSet<RoleDefinition> operatorRoleDefinitions = + EnumSet.of(RoleDefinition.hostedOperator, RoleDefinition.hostedSupporter); @Inject - public HorizonApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) { + public HorizonApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, FlagSource flagSource) { super(parentCtx); + this.billingController = controller.serviceRegistry().billingController(); this.systemName = controller.system(); this.client = controller.serviceRegistry().horizonClient(); + this.enabledHorizonDashboard = Flags.ENABLED_HORIZON_DASHBOARD.bindTo(flagSource); } @Override public HttpResponse handle(HttpRequest request) { + var roles = getRoles(request); + var operator = roles.stream().map(Role::definition).anyMatch(operatorRoleDefinitions::contains); + var authorizedTenants = getAuthorizedTenants(roles); + + if (!operator && authorizedTenants.isEmpty()) + return ErrorResponse.forbidden("No tenant with enabled metrics view"); + try { switch (request.getMethod()) { case GET: return get(request); - case POST: return post(request); + case POST: return post(request, authorizedTenants, operator); case PUT: return put(request); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } @@ -65,10 +93,10 @@ public class HorizonApiHandler extends LoggingRequestHandler { return ErrorResponse.notFoundError("Nothing at " + path); } - private HttpResponse post(HttpRequest request) { + private HttpResponse post(HttpRequest request, Set<TenantName> authorizedTenants, boolean operator) { Path path = new Path(request.getUri()); - if (path.matches("/horizon/v1/tsdb/api/query/graph")) return tsdbQuery(request, true); - if (path.matches("/horizon/v1/meta/search/timeseries")) return tsdbQuery(request, false); + if (path.matches("/horizon/v1/tsdb/api/query/graph")) return tsdbQuery(request, authorizedTenants, operator, true); + if (path.matches("/horizon/v1/meta/search/timeseries")) return tsdbQuery(request, authorizedTenants, operator, false); return ErrorResponse.notFoundError("Nothing at " + path); } @@ -78,10 +106,9 @@ public class HorizonApiHandler extends LoggingRequestHandler { return ErrorResponse.notFoundError("Nothing at " + path); } - private HttpResponse tsdbQuery(HttpRequest request, boolean isMetricQuery) { - SecurityContext securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); + private HttpResponse tsdbQuery(HttpRequest request, Set<TenantName> authorizedTenants, boolean operator, boolean isMetricQuery) { try { - byte[] data = TsdbQueryRewriter.rewrite(request.getData().readAllBytes(), securityContext.roles(), systemName); + byte[] data = TsdbQueryRewriter.rewrite(request.getData().readAllBytes(), authorizedTenants, operator, systemName); return new JsonInputStreamResponse(isMetricQuery ? client.getMetrics(data) : client.getMetaData(data)); } catch (TsdbQueryRewriter.UnauthorizedException e) { return ErrorResponse.forbidden("Access denied"); @@ -90,11 +117,22 @@ public class HorizonApiHandler extends LoggingRequestHandler { } } - private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> clazz) { - return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) - .filter(clazz::isInstance) - .map(clazz::cast) - .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); + private static Set<Role> getRoles(HttpRequest request) { + return Optional.ofNullable(request.getJDiscRequest().context().get(SecurityContext.ATTRIBUTE_NAME)) + .filter(SecurityContext.class::isInstance) + .map(SecurityContext.class::cast) + .map(SecurityContext::roles) + .orElseThrow(() -> new IllegalArgumentException("Attribute '" + SecurityContext.ATTRIBUTE_NAME + "' was not set on request")); + } + + private Set<TenantName> getAuthorizedTenants(Set<Role> roles) { + var horizonEnabled = roles.stream() + .filter(TenantRole.class::isInstance) + .map(role -> ((TenantRole) role).tenant()) + .filter(tenant -> enabledHorizonDashboard.with(FetchVector.Dimension.TENANT_ID, tenant.value()).value()) + .collect(Collectors.toList()); + + return new HashSet<>(billingController.tenantsWithPlan(horizonEnabled, PlanId.from("pay-as-you-go"))); } private static class JsonInputStreamResponse extends HttpResponse { diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java index e034be46063..3e20584dbac 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java @@ -7,12 +7,8 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.yahoo.config.provision.SystemName; import com.yahoo.config.provision.TenantName; -import com.yahoo.vespa.hosted.controller.api.role.Role; -import com.yahoo.vespa.hosted.controller.api.role.RoleDefinition; -import com.yahoo.vespa.hosted.controller.api.role.TenantRole; import java.io.IOException; -import java.util.EnumSet; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -23,20 +19,8 @@ import java.util.stream.Collectors; public class TsdbQueryRewriter { private static final ObjectMapper mapper = new ObjectMapper(); - private static final EnumSet<RoleDefinition> operatorRoleDefinitions = - EnumSet.of(RoleDefinition.hostedOperator, RoleDefinition.hostedSupporter); - - public static byte[] rewrite(byte[] data, Set<Role> roles, SystemName systemName) throws IOException { - boolean operator = roles.stream().map(Role::definition).anyMatch(operatorRoleDefinitions::contains); - - // Anyone with any tenant relation can view metrics for apps within those tenants - Set<TenantName> authorizedTenants = roles.stream() - .filter(TenantRole.class::isInstance) - .map(role -> ((TenantRole) role).tenant()) - .collect(Collectors.toUnmodifiableSet()); - if (!operator && authorizedTenants.isEmpty()) - throw new UnauthorizedException(); + public static byte[] rewrite(byte[] data, Set<TenantName> authorizedTenants, boolean operator, SystemName systemName) throws IOException { JsonNode root = mapper.readTree(data); requireLegalType(root); getField(root, "executionGraph", ArrayNode.class) diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java index 7b0a2c9d6d6..0ecc8ac81df 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java @@ -22,6 +22,7 @@ import com.yahoo.text.Text; import com.yahoo.vespa.flags.BooleanFlag; import com.yahoo.vespa.flags.FetchVector; import com.yahoo.vespa.flags.FlagSource; +import com.yahoo.vespa.flags.Flags; import com.yahoo.vespa.flags.IntFlag; import com.yahoo.vespa.flags.PermanentFlags; import com.yahoo.vespa.hosted.controller.Controller; @@ -71,6 +72,7 @@ public class UserApiHandler extends LoggingRequestHandler { private final Controller controller; private final BooleanFlag enable_public_signup_flow; private final IntFlag maxTrialTenants; + private final BooleanFlag enabledHorizonDashboard; @Inject public UserApiHandler(Context parentCtx, UserManagement users, Controller controller, FlagSource flagSource) { @@ -79,6 +81,7 @@ public class UserApiHandler extends LoggingRequestHandler { this.controller = controller; this.enable_public_signup_flow = PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.bindTo(flagSource); this.maxTrialTenants = PermanentFlags.MAX_TRIAL_TENANTS.bindTo(flagSource); + this.enabledHorizonDashboard = Flags.ENABLED_HORIZON_DASHBOARD.bindTo(flagSource); } @Override @@ -184,6 +187,10 @@ public class UserApiHandler extends LoggingRequestHandler { Cursor tenantRolesObject = tenantObject.setArray("roles"); tenantRolesByTenantName.getOrDefault(tenant, List.of()) .forEach(role -> tenantRolesObject.addString(role.definition().name())); + if (controller.system().isPublic()) { + tenantObject.setBool(enabledHorizonDashboard.id().toString(), + enabledHorizonDashboard.with(FetchVector.Dimension.TENANT_ID, tenant.value()).value()); + } }); if (!operatorRoles.isEmpty()) { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java index 86b9370150c..433a976358e 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java @@ -26,7 +26,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.dns.Record; import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordData; import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName; import com.yahoo.vespa.hosted.controller.api.integration.dns.WeightedAliasTarget; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Deployment; import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics; import com.yahoo.vespa.hosted.controller.application.Endpoint; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiffTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiffTest.java new file mode 100644 index 00000000000..b2aba721a6f --- /dev/null +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiffTest.java @@ -0,0 +1,128 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.application.pkg; + +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Map; +import java.util.zip.Deflater; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +import static com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackageDiff.diff; + +import static com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackageDiff.diffAgainstEmpty; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.Assert.assertEquals; + +/** + * @author freva + */ +public class ApplicationPackageDiffTest { + private static final ApplicationPackage app1 = applicationPackage(Map.of("file1", "contents of the\nfirst file", "dir/myfile", "Second file", "dir/binary", "øøøø")); + private static final ApplicationPackage app2 = applicationPackage(Map.of("file1", "updated contents\nof the\nfirst file\nafter some changes", "dir/myfile2", "Second file", "dir/binary", "øøøø")); + + @Test + public void no_diff() { + assertEquals("No diff\n", new String(diff(app1, app1))); + } + + @Test + public void diff_against_empty() { + assertEquals("--- dir/binary\n" + + "Diff skipped: File is binary (new file -> 8B)\n" + + "\n" + + "--- dir/myfile\n" + + "@@ -1,0 +1,1 @@\n" + + "+ Second file\n" + + "\n" + + "--- file1\n" + + "@@ -1,0 +1,2 @@\n" + + "+ contents of the\n" + + "+ first file\n" + + "\n", new String(diffAgainstEmpty(app1))); + } + + @Test + public void full_diff() { + // Even though dir/binary is binary file, we can see they are identical, so it should not print "Diff skipped" + assertEquals("--- dir/myfile\n" + + "@@ -1,1 +1,0 @@\n" + + "- Second file\n" + + "\n" + + "--- dir/myfile2\n" + + "@@ -1,0 +1,1 @@\n" + + "+ Second file\n" + + "\n" + + "--- file1\n" + + "@@ -1,2 +1,4 @@\n" + + "+ updated contents\n" + + "+ of the\n" + + "- contents of the\n" + + " first file\n" + + "+ after some changes\n" + + "\n", new String(diff(app1, app2))); + } + + @Test + public void skips_diff_for_too_large_files() { + assertEquals("--- dir/myfile\n" + + "@@ -1,1 +1,0 @@\n" + + "- Second file\n" + + "\n" + + "--- dir/myfile2\n" + + "@@ -1,0 +1,1 @@\n" + + "+ Second file\n" + + "\n" + + "--- file1\n" + + "Diff skipped: File too large (26B -> 53B)\n" + + "\n", new String(diff(app1, app2, 12, 1000, 1000))); + } + + @Test + public void skips_diff_if_file_diff_is_too_large() { + assertEquals("--- dir/myfile\n" + + "@@ -1,1 +1,0 @@\n" + + "- Second file\n" + + "\n" + + "--- dir/myfile2\n" + + "@@ -1,0 +1,1 @@\n" + + "+ Second file\n" + + "\n" + + "--- file1\n" + + "Diff skipped: Diff too large (96B)\n" + + "\n", new String(diff(app1, app2, 1000, 50, 1000))); + } + + @Test + public void skips_diff_if_total_diff_is_too_large() { + assertEquals("--- dir/myfile\n" + + "@@ -1,1 +1,0 @@\n" + + "- Second file\n" + + "\n" + + "--- dir/myfile2\n" + + "Diff skipped: Total diff size >20B)\n" + + "\n" + + "--- file1\n" + + "Diff skipped: Total diff size >20B)\n" + + "\n", new String(diff(app1, app2, 1000, 1000, 20))); + } + + private static ApplicationPackage applicationPackage(Map<String, String> files) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ZipOutputStream out = new ZipOutputStream(baos)) { + out.setLevel(Deflater.NO_COMPRESSION); // This is for testing purposes so we skip compression for performance + for (Map.Entry<String, String> file : files.entrySet()) { + ZipEntry entry = new ZipEntry(file.getKey()); + out.putNextEntry(entry); + out.write(file.getValue().getBytes(UTF_8)); + out.closeEntry(); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + return new ApplicationPackage(baos.toByteArray()); + } +} diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackageTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageTest.java index 1849be9b6bd..75e00e3434c 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackageTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageTest.java @@ -1,5 +1,5 @@ // Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.controller.application; +package com.yahoo.vespa.hosted.controller.application.pkg; import com.yahoo.config.application.api.DeploymentSpec; import com.yahoo.config.application.api.ValidationId; @@ -109,10 +109,10 @@ public class ApplicationPackageTest { } private static Map<String, String> unzip(byte[] zip) { - return new ZipStreamReader(new ByteArrayInputStream(zip), __ -> true, 1 << 10) + return new ZipStreamReader(new ByteArrayInputStream(zip), __ -> true, 1 << 10, true) .entries().stream() .collect(Collectors.toMap(entry -> entry.zipEntry().getName(), - entry -> new String(entry.content(), UTF_8))); + entry -> new String(entry.contentOrThrow(), UTF_8))); } } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparatorTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparatorTest.java new file mode 100644 index 00000000000..92137094f62 --- /dev/null +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparatorTest.java @@ -0,0 +1,112 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.application.pkg; + +import org.junit.Test; + +import java.util.Optional; +import java.util.stream.Collectors; + +import static org.junit.Assert.assertEquals; + +public class LinesComparatorTest { + private static final String text1 = "This part of the\n" + + "document has stayed the\n" + + "same from version to\n" + + "version. It shouldn't\n" + + "be shown if it doesn't\n" + + "change. Otherwise, that\n" + + "would not be helping to\n" + + "compress the size of the\n" + + "changes.\n" + + "\n" + + "This paragraph contains\n" + + "text that is outdated.\n" + + "It will be deleted in the\n" + + "near future.\n" + + "\n" + + "It is important to spell\n" + + "check this dokument. On\n" + + "the other hand, a\n" + + "misspelled word isn't\n" + + "the end of the world.\n" + + "Nothing in the rest of\n" + + "this paragraph needs to\n" + + "be changed. Things can\n" + + "be added after it."; + private static final String text2 = "This is an important\n" + + "notice! It should\n" + + "therefore be located at\n" + + "the beginning of this\n" + + "document!\n" + + "\n" + + "This part of the\n" + + "document has stayed the\n" + + "same from version to\n" + + "version. It shouldn't\n" + + "be shown if it doesn't\n" + + "change. Otherwise, that\n" + + "would not be helping to\n" + + "compress the size of the\n" + + "changes.\n" + + "\n" + + "It is important to spell\n" + + "check this document. On\n" + + "the other hand, a\n" + + "misspelled word isn't\n" + + "the end of the world.\n" + + "Nothing in the rest of\n" + + "this paragraph needs to\n" + + "be changed. Things can\n" + + "be added after it.\n" + + "\n" + + "This paragraph contains\n" + + "important new additions\n" + + "to this document."; + + @Test + public void diff_test() { + assertDiff(null, "", ""); + assertDiff(null, text1, text1); + assertDiff(text1.lines().map(line -> "- " + line).collect(Collectors.joining("\n", "@@ -1,24 +1,0 @@\n", "\n")), text1, ""); + assertDiff(text1.lines().map(line -> "+ " + line).collect(Collectors.joining("\n", "@@ -1,0 +1,24 @@\n", "\n")), "", text1); + assertDiff("@@ -1,3 +1,9 @@\n" + + "+ This is an important\n" + + "+ notice! It should\n" + + "+ therefore be located at\n" + + "+ the beginning of this\n" + + "+ document!\n" + + "+ \n" + + " This part of the\n" + + " document has stayed the\n" + + " same from version to\n" + + "@@ -7,14 +13,9 @@\n" + + " would not be helping to\n" + + " compress the size of the\n" + + " changes.\n" + + "- \n" + + "- This paragraph contains\n" + + "- text that is outdated.\n" + + "- It will be deleted in the\n" + + "- near future.\n" + + " \n" + + " It is important to spell\n" + + "+ check this document. On\n" + + "- check this dokument. On\n" + + " the other hand, a\n" + + " misspelled word isn't\n" + + " the end of the world.\n" + + "@@ -22,3 +23,7 @@\n" + + " this paragraph needs to\n" + + " be changed. Things can\n" + + " be added after it.\n" + + "+ \n" + + "+ This paragraph contains\n" + + "+ important new additions\n" + + "+ to this document.\n", text1, text2); + } + + private static void assertDiff(String expected, String left, String right) { + assertEquals(Optional.ofNullable(expected), + LinesComparator.diff(left.lines().collect(Collectors.toList()), right.lines().collect(Collectors.toList()))); + } +}
\ No newline at end of file diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReaderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ZipStreamReaderTest.java index abd234f0fa4..afbd232f01c 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReaderTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ZipStreamReaderTest.java @@ -1,5 +1,5 @@ // Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.controller.application; +package com.yahoo.vespa.hosted.controller.application.pkg; import com.yahoo.security.KeyAlgorithm; import com.yahoo.security.KeyUtils; @@ -38,15 +38,15 @@ public class ZipStreamReaderTest { public void test_size_limit() { Map<String, String> entries = Map.of("foo.xml", "foobar"); try { - new ZipStreamReader(new ByteArrayInputStream(zip(entries)), "foo.xml"::equals, 1); + new ZipStreamReader(new ByteArrayInputStream(zip(entries)), "foo.xml"::equals, 1, true); fail("Expected exception"); } catch (IllegalArgumentException ignored) {} entries = Map.of("foo.xml", "foobar", "foo.jar", "0".repeat(100) // File not extracted and thus not subject to size limit ); - ZipStreamReader reader = new ZipStreamReader(new ByteArrayInputStream(zip(entries)), "foo.xml"::equals,10); - byte[] extracted = reader.entries().get(0).content(); + ZipStreamReader reader = new ZipStreamReader(new ByteArrayInputStream(zip(entries)), "foo.xml"::equals, 10, true); + byte[] extracted = reader.entries().get(0).contentOrThrow(); assertEquals("foobar", new String(extracted, StandardCharsets.UTF_8)); } @@ -65,7 +65,7 @@ public class ZipStreamReaderTest { ); tests.forEach((name, expectException) -> { try { - new ZipStreamReader(new ByteArrayInputStream(zip(Map.of(name, "foo"))), name::equals, 1024); + new ZipStreamReader(new ByteArrayInputStream(zip(Map.of(name, "foo"))), name::equals, 1024, true); assertFalse("Expected exception for '" + name + "'", expectException); } catch (IllegalArgumentException ignored) { assertTrue("Unexpected exception for '" + name + "'", expectException); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java index b234ab4960b..73b1489b088 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java @@ -10,7 +10,7 @@ import com.yahoo.security.SignatureAlgorithm; import com.yahoo.security.X509CertificateBuilder; import com.yahoo.security.X509CertificateUtils; import com.yahoo.text.Text; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import javax.security.auth.x500.X500Principal; import java.io.ByteArrayOutputStream; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java index 420d0be04ac..c225dcbe49d 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java @@ -28,7 +28,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision; import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud; import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Deployment; import com.yahoo.vespa.hosted.controller.application.EndpointId; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; @@ -492,8 +492,8 @@ public class DeploymentContext { Run run = jobs.last(job) .filter(r -> r.id().type() == job.type()) .orElseThrow(() -> new AssertionError(job.type() + " is not among the active: " + jobs.active())); - assertFalse(run.id() + " should not have failed yet", run.hasFailed()); - assertFalse(run.id() + " should not have ended yet", run.hasEnded()); + assertFalse(run.id() + " should not have failed yet: " + run, run.hasFailed()); + assertFalse(run.id() + " should not have ended yet: " + run, run.hasEnded()); return run; } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java index d4c9425fc03..ad32266e290 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java @@ -8,7 +8,7 @@ import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Change; import com.yahoo.vespa.hosted.controller.versions.VespaVersion; import org.junit.Assert; @@ -61,7 +61,7 @@ import static org.junit.Assert.assertTrue; */ public class DeploymentTriggerTest { - private DeploymentTester tester = new DeploymentTester(); + private final DeploymentTester tester = new DeploymentTester(); @Test public void testTriggerFailing() { @@ -1110,15 +1110,16 @@ public class DeploymentTriggerTest { // System and staging tests both require unknown versions, and are broken. tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsCentral1, "user", false); app.runJob(productionCdUsCentral1) - .abortJob(systemTest) - .abortJob(stagingTest) + .jobAborted(systemTest) + .jobAborted(stagingTest) .runJob(systemTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsCentral1, cdPackage); app.submit(cdPackage); - app.runJob(systemTest); + app.jobAborted(systemTest) + .runJob(systemTest); // Staging test requires unknown initial version, and is broken. tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsCentral1, "user", false); app.runJob(productionCdUsCentral1) diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java index d685c6a2354..780d2d226f3 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java @@ -25,7 +25,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud; import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMailer; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.SystemApplication; import com.yahoo.vespa.hosted.controller.config.ControllerConfig; import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationStoreMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationStoreMock.java index 59e2b6c04d8..521ff160a05 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationStoreMock.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationStoreMock.java @@ -5,11 +5,11 @@ import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ApplicationName; import com.yahoo.config.provision.InstanceName; import com.yahoo.config.provision.TenantName; -import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationStore; import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion; import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId; +import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; import java.time.Instant; import java.util.Map; @@ -30,7 +30,9 @@ public class ApplicationStoreMock implements ApplicationStore { private static final byte[] tombstone = new byte[0]; private final Map<ApplicationId, Map<ApplicationVersion, byte[]>> store = new ConcurrentHashMap<>(); - private final Map<ApplicationId, Map<ZoneId, byte[]>> devStore = new ConcurrentHashMap<>(); + private final Map<DeploymentId, byte[]> devStore = new ConcurrentHashMap<>(); + private final Map<ApplicationId, Map<Long, byte[]>> diffs = new ConcurrentHashMap<>(); + private final Map<DeploymentId, Map<Long, byte[]>> devDiffs = new ConcurrentHashMap<>(); private final Map<ApplicationId, NavigableMap<Instant, byte[]>> meta = new ConcurrentHashMap<>(); private final Map<DeploymentId, NavigableMap<Instant, byte[]>> metaManual = new ConcurrentHashMap<>(); @@ -43,15 +45,30 @@ public class ApplicationStoreMock implements ApplicationStore { } @Override - public byte[] get(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion) { - byte[] bytes = store.get(appId(tenant, application)).get(applicationVersion); + public byte[] get(DeploymentId deploymentId, ApplicationVersion applicationVersion) { + if (applicationVersion.isDeployedDirectly()) + return requireNonNull(devStore.get(deploymentId)); + + TenantAndApplicationId tenantAndApplicationId = TenantAndApplicationId.from(deploymentId.applicationId()); + byte[] bytes = store.get(appId(tenantAndApplicationId.tenant(), tenantAndApplicationId.application())).get(applicationVersion); if (bytes == null) - throw new IllegalArgumentException("No application package found for " + tenant + "." + application + + throw new IllegalArgumentException("No application package found for " + tenantAndApplicationId + " with version " + applicationVersion.id()); return bytes; } @Override + public Optional<byte[]> getDiff(TenantName tenantName, ApplicationName applicationName, long buildNumber) { + return Optional.ofNullable(diffs.get(appId(tenantName, applicationName))).map(map -> map.get(buildNumber)); + } + + @Override + public void pruneDiffs(TenantName tenantName, ApplicationName applicationName, long beforeBuildNumber) { + Optional.ofNullable(diffs.get(appId(tenantName, applicationName))) + .ifPresent(map -> map.keySet().removeIf(buildNumber -> buildNumber < beforeBuildNumber)); + } + + @Override public Optional<byte[]> find(TenantName tenant, ApplicationName application, long buildNumber) { return store.getOrDefault(appId(tenant, application), Map.of()).entrySet().stream() .filter(kv -> kv.getKey().buildNumber().orElse(Long.MIN_VALUE) == buildNumber) @@ -60,9 +77,10 @@ public class ApplicationStoreMock implements ApplicationStore { } @Override - public void put(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] applicationPackage) { - store.putIfAbsent(appId(tenant, application), new ConcurrentHashMap<>()); - store.get(appId(tenant, application)).put(applicationVersion, applicationPackage); + public void put(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] applicationPackage, byte[] diff) { + store.computeIfAbsent(appId(tenant, application), __ -> new ConcurrentHashMap<>()).put(applicationVersion, applicationPackage); + applicationVersion.buildNumber().ifPresent(buildNumber -> + diffs.computeIfAbsent(appId(tenant, application), __ -> new ConcurrentHashMap<>()).put(buildNumber, diff)); } @Override @@ -83,8 +101,8 @@ public class ApplicationStoreMock implements ApplicationStore { @Override public void putTester(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] testerPackage) { - store.putIfAbsent(testerId(tenant, application), new ConcurrentHashMap<>()); - store.get(testerId(tenant, application)).put(applicationVersion, testerPackage); + store.computeIfAbsent(testerId(tenant, application), key -> new ConcurrentHashMap<>()) + .put(applicationVersion, testerPackage); } @Override @@ -99,14 +117,21 @@ public class ApplicationStoreMock implements ApplicationStore { } @Override - public void putDev(ApplicationId application, ZoneId zone, byte[] applicationPackage) { - devStore.putIfAbsent(application, new ConcurrentHashMap<>()); - devStore.get(application).put(zone, applicationPackage); + public Optional<byte[]> getDevDiff(DeploymentId deploymentId, long buildNumber) { + return Optional.ofNullable(devDiffs.get(deploymentId)).map(map -> map.get(buildNumber)); + } + + @Override + public void pruneDevDiffs(DeploymentId deploymentId, long beforeBuildNumber) { + Optional.ofNullable(devDiffs.get(deploymentId)) + .ifPresent(map -> map.keySet().removeIf(buildNumber -> buildNumber < beforeBuildNumber)); } @Override - public byte[] getDev(ApplicationId application, ZoneId zone) { - return requireNonNull(devStore.get(application).get(zone)); + public void putDev(DeploymentId deploymentId, ApplicationVersion applicationVersion, byte[] applicationPackage, byte[] diff) { + devStore.put(deploymentId, applicationPackage); + applicationVersion.buildNumber().ifPresent(buildNumber -> + devDiffs.computeIfAbsent(deploymentId, __ -> new ConcurrentHashMap<>()).put(buildNumber, diff)); } @Override diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java index e18542108c0..cbdf5dcb075 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java @@ -40,7 +40,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.TestReport; import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud; import com.yahoo.vespa.hosted.controller.api.integration.noderepository.RestartFilter; import com.yahoo.vespa.hosted.controller.api.integration.secrets.TenantSecretStore; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.SystemApplication; import com.yahoo.vespa.serviceview.bindings.ApplicationView; import com.yahoo.vespa.serviceview.bindings.ClusterView; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java index d7934f08fee..bd50078bc87 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java @@ -9,7 +9,7 @@ import com.yahoo.vespa.hosted.controller.ControllerTester; import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveBucket; import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeRepository; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.SystemApplication; import com.yahoo.vespa.hosted.controller.deployment.DeploymentContext; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java index 31f8aaf9e2d..b27e30e9ed6 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java @@ -6,7 +6,7 @@ import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.hosted.controller.Instance; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Deployment; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java index af2c0c6b08d..7ee1349f6d5 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java @@ -7,7 +7,7 @@ import com.yahoo.vespa.hosted.controller.LockedTenant; import com.yahoo.vespa.hosted.controller.api.integration.organization.Contact; import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId; import com.yahoo.vespa.hosted.controller.api.integration.stubs.LoggingDeploymentIssues; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Change; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java index 8083b847c0b..0cb7b192b8b 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java @@ -9,7 +9,7 @@ import com.yahoo.vespa.hosted.controller.Controller; import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics; import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Deployment; import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgraderTest.java index 7a8f775e8b1..ec33c8a7048 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgraderTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgraderTest.java @@ -5,21 +5,13 @@ import com.yahoo.component.Version; import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.zone.ZoneId; -import com.yahoo.vespa.hosted.controller.Instance; -import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; -import com.yahoo.vespa.hosted.controller.application.Deployment; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; -import com.yahoo.vespa.hosted.controller.deployment.Run; -import com.yahoo.vespa.hosted.controller.deployment.RunStatus; import org.junit.Test; import java.time.Duration; import java.time.Instant; -import java.time.temporal.ChronoUnit; -import java.time.temporal.TemporalUnit; -import java.util.Optional; import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.devUsEast1; import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.productionUsWest1; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java index 023c5671b60..7970e20f6c7 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java @@ -7,7 +7,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationV import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; import com.yahoo.vespa.hosted.controller.deployment.JobController; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java index a9046a8e060..2fb5aee354b 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java @@ -15,7 +15,7 @@ import com.yahoo.vespa.hosted.controller.ControllerTester; import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId; import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node; import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeFilter; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Change; import com.yahoo.vespa.hosted.controller.application.SystemApplication; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java index 00d39788e38..9e8842243c0 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java @@ -4,7 +4,7 @@ package com.yahoo.vespa.hosted.controller.maintenance; import com.yahoo.component.Version; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Change; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java index 5ef64b460b9..a255a6c37d8 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java @@ -12,7 +12,7 @@ import com.yahoo.vespa.hosted.controller.ControllerTester; import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node; import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshot; import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMeteringClient; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; import com.yahoo.vespa.hosted.controller.integration.MetricsMock; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java index df93efab893..3fd9afe5445 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java @@ -5,7 +5,7 @@ package com.yahoo.vespa.hosted.controller.maintenance; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntry; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java index f4e688379e5..3c99034761c 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java @@ -6,7 +6,7 @@ import com.yahoo.config.provision.TenantName; import com.yahoo.vespa.hosted.controller.Instance; import com.yahoo.vespa.hosted.controller.api.integration.aws.MockRoleService; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; import org.hamcrest.Matchers; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java index 29d77c38b1a..f2f71b9a5b8 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java @@ -7,7 +7,7 @@ import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics; import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; import com.yahoo.vespa.hosted.controller.integration.NodeRepositoryMock; import org.junit.Test; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java index 91b5dc232e5..1dd4ef24c9e 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java @@ -6,7 +6,7 @@ import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.test.ManualClock; import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Change; import com.yahoo.vespa.hosted.controller.application.Deployment; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainerTest.java index 08be2266b2e..b658e86a575 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UserManagementMaintainerTest.java @@ -6,6 +6,7 @@ import com.yahoo.vespa.hosted.controller.ControllerTester; import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockUserManagement; import com.yahoo.vespa.hosted.controller.api.integration.user.Roles; import com.yahoo.vespa.hosted.controller.api.integration.user.UserManagement; +import com.yahoo.vespa.hosted.controller.api.role.Role; import org.junit.Test; import java.time.Duration; @@ -33,6 +34,7 @@ public class UserManagementMaintainerTest { Roles.tenantRoles(tenant).forEach(userManagement::createRole); Roles.applicationRoles(tenant, app).forEach(userManagement::createRole); Roles.tenantRoles(deletedTenant).forEach(userManagement::createRole); + userManagement.createRole(Role.hostedSupporter()); var expectedRoles = Roles.tenantRoles(deletedTenant); var actualRoles = userManagementMaintainer.findLeftoverRoles(); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java index 37a173ffc37..2ae45c75cae 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java @@ -88,7 +88,8 @@ public class ApplicationSerializerTest { Optional.of(Version.fromString("1.2.3")), Optional.of(Instant.ofEpochMilli(666)), Optional.empty(), - Optional.of("best commit")); + Optional.of("best commit"), + true); assertEquals("https://github/org/repo/tree/commit1", applicationVersion1.sourceUrl().get()); ApplicationVersion applicationVersion2 = ApplicationVersion diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java index 03a050db74e..7623a02f6af 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java @@ -82,13 +82,16 @@ public class RunSerializerTest { assertEquals(running, run.status()); assertEquals(3, run.lastTestLogEntry()); assertEquals(new Version(1, 2, 3), run.versions().targetPlatform()); - ApplicationVersion applicationVersion = ApplicationVersion.from(new SourceRevision("git@github.com:user/repo.git", - "master", - "f00bad"), + ApplicationVersion applicationVersion = ApplicationVersion.from(Optional.of(new SourceRevision("git@github.com:user/repo.git", + "master", + "f00bad")), 123, - "a@b", - Version.fromString("6.3.1"), - Instant.ofEpochMilli(100)); + Optional.of("a@b"), + Optional.of(Version.fromString("6.3.1")), + Optional.of(Instant.ofEpochMilli(100)), + Optional.empty(), + Optional.empty(), + true); assertEquals(applicationVersion, run.versions().targetApplication()); assertEquals(applicationVersion.authorEmail(), run.versions().targetApplication().authorEmail()); assertEquals(applicationVersion.buildTime(), run.versions().targetApplication().buildTime()); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json index 0f40dd27664..7b9131a38dd 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json @@ -40,6 +40,7 @@ "branch": "master", "commit": "f00bad", "build": 123, + "deployedDirectly": true, "authorEmail": "a@b", "compileVersion": "6.3.1", "buildTime": 100, @@ -48,7 +49,8 @@ "repository": "git@github.com:user/repo.git", "branch": "master", "commit": "badb17", - "build": 122 + "build": 122, + "deployedDirectly": false } } } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java index ed6fc776e8d..bf2cd039afd 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java @@ -52,7 +52,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.resource.MeteringData; import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceAllocation; import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshot; import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMeteringClient; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Change; import com.yahoo.vespa.hosted.controller.application.Deployment; import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics; @@ -300,6 +300,13 @@ public class ApplicationApiTest extends ControllerContainerTest { .data(createApplicationDeployData(applicationPackageInstance1, false)), new File("deployment-job-accepted-2.json")); + tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/job/dev-us-east-1/diff/1", GET).userIdentity(HOSTED_VESPA_OPERATOR), + (response) -> assertTrue(response.getBodyAsString(), + response.getBodyAsString().contains("--- search-definitions/test.sd\n" + + "@@ -1,0 +1,1 @@\n" + + "+ search test { }\n")), + 200); + // DELETE a dev deployment is allowed under user instance for tenant admins tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE) .userIdentity(USER_ID), @@ -614,13 +621,14 @@ public class ApplicationApiTest extends ControllerContainerTest { // POST to request a service dump tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", POST) .userIdentity(HOSTED_VESPA_OPERATOR) - .data("{\"configId\":\"default/container.1\"}"), + .data("{\"configId\":\"default/container.1\",\"artifacts\":[\"jvm-dump\"]}"), "{\"message\":\"Request created\"}"); // GET to get status of service dump tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/node/host-tenant1:application1:instance1-prod.us-central-1/service-dump", GET) .userIdentity(HOSTED_VESPA_OPERATOR), - "{\"createdMillis\":" + tester.controller().clock().millis() + ",\"configId\":\"default/container.1\"}"); + "{\"createdMillis\":" + tester.controller().clock().millis() + ",\"configId\":\"default/container.1\"" + + ",\"artifacts\":[\"jvm-dump\"]}"); // POST a 'restart application' command tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-central-1/instance/instance1/restart", POST) @@ -756,6 +764,12 @@ public class ApplicationApiTest extends ControllerContainerTest { .data(createApplicationSubmissionData(packageWithService, 123)), "{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}"); + tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR), + (response) -> assertTrue(response.getBodyAsString(), + response.getBodyAsString().contains("+ <deployment version='1.0' athenz-domain='domain1' athenz-service='service'>\n" + + "- <deployment version='1.0' >\n")), + 200); + // GET last submitted application package tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR), (response) -> { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java index 72295497c03..de5ae466039 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java @@ -9,7 +9,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServ import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; import com.yahoo.vespa.hosted.controller.api.integration.deployment.TestReport; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; import org.junit.Test; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json index 2813bd0ab7d..abe3d4100d9 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json @@ -502,7 +502,9 @@ "status": "success", "versions": { "targetPlatform": "6.1.0", - "targetApplication": {} + "targetApplication": { + "build": 1 + } }, "steps": [ { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-aws-us-east-2a-runs.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-aws-us-east-2a-runs.json index acde58f2a28..dce73ad56cd 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-aws-us-east-2a-runs.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-aws-us-east-2a-runs.json @@ -8,7 +8,9 @@ "status": "success", "versions": { "targetPlatform": "7.1.0", - "targetApplication": {} + "targetApplication": { + "build": 1 + } }, "steps": [ { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-overview.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-overview.json index e3beb371acd..92a823bdfc2 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-overview.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-overview.json @@ -5,7 +5,9 @@ "runs": [ { "versions": { - "targetApplication": {}, + "targetApplication": { + "build": 1 + }, "targetPlatform": "6.1.0" }, "start": 0, diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json index 72411d155c7..3ef993c6589 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json @@ -6,7 +6,7 @@ { "at": 0, "type": "info", - "message": "Deploying platform version 6.1 and application version unknown ..." + "message": "Deploying platform version 6.1 and application version 1.0.1 ..." }, { "at": 0, diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json index 9a742a9b176..7ebc2d24fe9 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json @@ -11,7 +11,9 @@ "status": "success", "versions": { "targetPlatform": "6.1.0", - "targetApplication": {} + "targetApplication": { + "build": 1 + } }, "steps": [ { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-user-instance.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-user-instance.json index 2601937faee..f8aba54356b 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-user-instance.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-user-instance.json @@ -5,7 +5,9 @@ "runs": [ { "versions": { - "targetApplication": {}, + "targetApplication": { + "build": 1 + }, "targetPlatform": "7.1.0" }, "start": 14503000, diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java index 63474ebb7c9..438da66e6e8 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java @@ -3,7 +3,7 @@ package com.yahoo.vespa.hosted.controller.restapi.deployment; import com.yahoo.vespa.hosted.controller.ControllerTester; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; import com.yahoo.vespa.hosted.controller.restapi.ContainerTester; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java index cd24ec170c5..460afb102d9 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java @@ -7,7 +7,7 @@ import com.yahoo.config.provision.HostName; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.hosted.controller.ControllerTester; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; import com.yahoo.vespa.hosted.controller.restapi.ContainerTester; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiTest.java new file mode 100644 index 00000000000..8e51f8210c7 --- /dev/null +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiTest.java @@ -0,0 +1,67 @@ +package com.yahoo.vespa.hosted.controller.restapi.horizon; + +import com.yahoo.config.provision.SystemName; +import com.yahoo.config.provision.TenantName; +import com.yahoo.vespa.flags.Flags; +import com.yahoo.vespa.flags.InMemoryFlagSource; +import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId; +import com.yahoo.vespa.hosted.controller.api.role.Role; +import com.yahoo.vespa.hosted.controller.restapi.ContainerTester; +import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerCloudTest; +import org.junit.Test; + +import java.util.Set; + +/** + * @author olaa + */ +public class HorizonApiTest extends ControllerContainerCloudTest { + + @Test + public void only_operators_and_flag_enabled_tenants_allowed() { + ContainerTester tester = new ContainerTester(container, ""); + TenantName tenantName = TenantName.defaultName(); + + tester.assertResponse(request("/horizon/v1/config/dashboard/topFolders") + .roles(Set.of(Role.hostedOperator())), + "", 200); + + tester.assertResponse(request("/horizon/v1/config/dashboard/topFolders") + .roles(Set.of(Role.reader(tenantName))), + "{\"error-code\":\"FORBIDDEN\",\"message\":\"No tenant with enabled metrics view\"}", 403); + + ((InMemoryFlagSource) tester.controller().flagSource()) + .withBooleanFlag(Flags.ENABLED_HORIZON_DASHBOARD.id(), true); + + tester.controller().serviceRegistry().billingController().setPlan(tenantName, PlanId.from("pay-as-you-go"), true); + + tester.assertResponse(request("/horizon/v1/config/dashboard/topFolders") + .roles(Set.of(Role.reader(tenantName))), + "", 200); + } + + @Override + protected SystemName system() { + return SystemName.PublicCd; + } + + @Override + protected String variablePartXml() { + return " <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControlRequests'/>\n" + + " <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControl'/>\n" + + + " <handler id=\"com.yahoo.vespa.hosted.controller.restapi.horizon.HorizonApiHandler\" bundle=\"controller-server\">\n" + + " <binding>http://*/horizon/v1/*</binding>\n" + + " </handler>\n" + + + " <http>\n" + + " <server id='default' port='8080' />\n" + + " <filtering>\n" + + " <request-chain id='default'>\n" + + " <filter id='com.yahoo.vespa.hosted.controller.restapi.filter.ControllerAuthorizationFilter'/>\n" + + " <binding>http://*/*</binding>\n" + + " </request-chain>\n" + + " </filtering>\n" + + " </http>\n"; + } +}
\ No newline at end of file diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java index ab9d50f8eae..d31d9c28c6c 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java @@ -22,33 +22,28 @@ public class TsdbQueryRewriterTest { @Test public void rewrites_query() throws IOException { - assertRewrite("filters-complex.json", "filters-complex.expected.json", Role.reader(TenantName.from("tenant2"))); + assertRewrite("filters-complex.json", "filters-complex.expected.json", Set.of(TenantName.from("tenant2")), false); assertRewrite("filter-in-execution-graph.json", "filter-in-execution-graph.expected.json", - Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3"))); + Set.of(TenantName.from("tenant2"), TenantName.from("tenant3")), false); assertRewrite("filter-in-execution-graph.json", "filter-in-execution-graph.expected.operator.json", - Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")), Role.hostedOperator()); + Set.of(TenantName.from("tenant2"), TenantName.from("tenant3")), true); assertRewrite("no-filters.json", "no-filters.expected.json", - Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3"))); + Set.of(TenantName.from("tenant2"), TenantName.from("tenant3")), false); assertRewrite("filters-meta-query.json", "filters-meta-query.expected.json", - Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3"))); + Set.of(TenantName.from("tenant2"), TenantName.from("tenant3")), false); } - @Test(expected = TsdbQueryRewriter.UnauthorizedException.class) - public void throws_if_no_roles() throws IOException { - assertRewrite("filters-complex.json", "filters-complex.expected.json"); - } - - private static void assertRewrite(String initialFilename, String expectedFilename, Role... roles) throws IOException { + private static void assertRewrite(String initialFilename, String expectedFilename, Set<TenantName> tenants, boolean operator) throws IOException { byte[] data = Files.readAllBytes(Paths.get("src/test/resources/horizon", initialFilename)); - data = TsdbQueryRewriter.rewrite(data, Set.of(roles), SystemName.Public); + data = TsdbQueryRewriter.rewrite(data, tenants, operator, SystemName.Public); ByteArrayOutputStream baos = new ByteArrayOutputStream(); new JsonFormat(false).encode(baos, SlimeUtils.jsonToSlime(data)); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json index ae3dc68d9e3..e883993cb53 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json @@ -14,17 +14,20 @@ "roles": [ "developer", "reader" - ] + ], + "enabled-horizon-dashboard":false }, "tenant1": { "roles": [ "administrator" - ] + ], + "enabled-horizon-dashboard":false }, "tenant2": { "roles": [ "developer" - ] + ], + "enabled-horizon-dashboard":false } } } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java index aa9775f1d43..1b86a0930d4 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java @@ -5,7 +5,7 @@ import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.SystemName; import com.yahoo.config.provision.zone.RoutingMethod; import com.yahoo.vespa.hosted.controller.ControllerTester; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.AssignedRotation; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; import com.yahoo.vespa.hosted.controller.deployment.DeploymentContext; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java index 79b564eee52..3f805ba2916 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java @@ -24,7 +24,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; import com.yahoo.vespa.hosted.controller.api.integration.dns.Record; import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordData; import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.Endpoint; import com.yahoo.vespa.hosted.controller.application.EndpointId; import com.yahoo.vespa.hosted.controller.application.EndpointList; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java index e3fef9f9066..a1108d5f03c 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java @@ -12,7 +12,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node; import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeFilter; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType; -import com.yahoo.vespa.hosted.controller.application.ApplicationPackage; +import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage; import com.yahoo.vespa.hosted.controller.application.SystemApplication; import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/SlobrokPolicy.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/SlobrokPolicy.java index 1ffce622d78..7648da6dca4 100644 --- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/SlobrokPolicy.java +++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/SlobrokPolicy.java @@ -33,7 +33,7 @@ public abstract class SlobrokPolicy implements DocumentProtocolRoutingPolicy { } } catch (InterruptedException e) { } - firstTry = true; + firstTry = false; } } } diff --git a/eval/src/vespa/eval/eval/llvm/addr_to_symbol.cpp b/eval/src/vespa/eval/eval/llvm/addr_to_symbol.cpp index 566ab931837..99aeb390544 100644 --- a/eval/src/vespa/eval/eval/llvm/addr_to_symbol.cpp +++ b/eval/src/vespa/eval/eval/llvm/addr_to_symbol.cpp @@ -8,6 +8,7 @@ using vespalib::demangle; using llvm::object::ObjectFile; +using SymbolType = llvm::object::SymbolRef::Type; namespace vespalib::eval { @@ -15,6 +16,11 @@ namespace { void my_local_test_symbol() {} +bool symbol_is_data_or_function(SymbolType type) +{ + return ((type == SymbolType::ST_Data) || (type == SymbolType::ST_Function)); +} + } // <unnamed> vespalib::string addr_to_symbol(const void *addr) { @@ -42,7 +48,10 @@ vespalib::string addr_to_symbol(const void *addr) { for (const auto &symbol: symbols) { auto sym_name = symbol.getName(); auto sym_addr = symbol.getAddress(); - if (sym_name && sym_addr && (*sym_addr == offset)) { + auto sym_type = symbol.getType(); + if (sym_name && sym_addr && sym_type && + symbol_is_data_or_function(*sym_type) && + (*sym_addr == offset)) { return demangle(sym_name->str().c_str()); } } diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java index e0c75b99d83..a7c17516161 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java @@ -13,9 +13,7 @@ import java.util.Optional; import java.util.TreeMap; import static com.yahoo.vespa.flags.FetchVector.Dimension.APPLICATION_ID; -import static com.yahoo.vespa.flags.FetchVector.Dimension.CLUSTER_TYPE; import static com.yahoo.vespa.flags.FetchVector.Dimension.HOSTNAME; -import static com.yahoo.vespa.flags.FetchVector.Dimension.NODE_TYPE; import static com.yahoo.vespa.flags.FetchVector.Dimension.TENANT_ID; import static com.yahoo.vespa.flags.FetchVector.Dimension.VESPA_VERSION; import static com.yahoo.vespa.flags.FetchVector.Dimension.ZONE_ID; @@ -130,10 +128,10 @@ public class Flags { "Takes effect at redeployment", ZONE_ID, APPLICATION_ID); - public static final UnboundBooleanFlag ORCHESTRATE_MISSING_PROXIES = defineFeatureFlag( - "orchestrate-missing-proxies", true, - List.of("hakonhall"), "2021-08-05", "2021-10-05", - "Whether the Orchestrator can assume any missing proxy services are down.", + public static final UnboundIntFlag NUM_DEPLOY_HELPER_THREADS = defineIntFlag( + "num-model-builder-threads", 0, + List.of("balder"), "2021-09-09", "2021-10-01", + "Number of threads used for speeding up building of models.", "Takes effect on first (re)start of config server"); public static final UnboundBooleanFlag GROUP_PERMANENT_SUSPENSION = defineFeatureFlag( @@ -149,9 +147,15 @@ public class Flags { "Allow migrating an unencrypted data partition to being encrypted when (de)provisioned.", "Takes effect on next host-admin tick."); + public static final UnboundBooleanFlag NEW_SPARE_DISKS = defineFeatureFlag( + "new-spare-disks", false, + List.of("hakonhall"), "2021-09-08", "2021-11-08", + "Use a new algorithm to calculate the spare disks of a host.", + "Takes effect on first run of DiskTask, typically after host-admin restart/upgrade."); + public static final UnboundBooleanFlag ENABLE_FEED_BLOCK_IN_DISTRIBUTOR = defineFeatureFlag( "enable-feed-block-in-distributor", true, - List.of("geirst"), "2021-01-27", "2021-09-01", + List.of("geirst"), "2021-01-27", "2021-11-01", "Enables blocking of feed in the distributor if resource usage is above limit on at least one content node", "Takes effect at redeployment", ZONE_ID, APPLICATION_ID); @@ -206,7 +210,7 @@ public class Flags { APPLICATION_ID); public static final UnboundBooleanFlag DISTRIBUTE_EXTERNAL_RANK_EXPRESSION = defineFeatureFlag( - "distribute-external-rank-expression", false, + "distribute-external-rank-expression", true, List.of("baldersheim"), "2021-05-27", "2021-09-15", "Whether to use distributed external rank expression files by filedistribution", "Takes effect on next internal redeployment", @@ -239,13 +243,6 @@ public class Flags { "Takes effect on next deployment through controller", APPLICATION_ID); - public static final UnboundBooleanFlag USE_REAL_RESOURCES = defineFeatureFlag( - "use-real-resources", false, - List.of("freva"), "2021-09-08", "2021-10-01", - "Whether host-admin should use real resources (rather than advertised resources) when creating linux container and reporting metrics", - "Takes effect on next host-admin tick", - CLUSTER_TYPE, NODE_TYPE); - public static final UnboundListFlag<String> DEFER_APPLICATION_ENCRYPTION = defineListFlag( "defer-application-encryption", List.of(), String.class, List.of("mpolden", "hakonhall"), "2021-06-23", "2021-10-01", @@ -286,6 +283,14 @@ public class Flags { "Takes effect immediately", ZONE_ID, APPLICATION_ID); + public static final UnboundBooleanFlag ENABLED_HORIZON_DASHBOARD = defineFeatureFlag( + "enabled-horizon-dashboard", false, + List.of("olaa"), "2021-09-13", "2021-12-31", + "Enable Horizon dashboard", + "Takes effect immediately", + TENANT_ID + ); + /** WARNING: public for testing: All flags should be defined in {@link Flags}. */ public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners, String createdAt, String expiresAt, String description, diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsConsumers.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsConsumers.java index 8691b569a95..35d498b9f77 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsConsumers.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsConsumers.java @@ -7,6 +7,7 @@ import ai.vespa.metricsproxy.metric.model.ConsumerId; import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -30,7 +31,7 @@ public class MetricsConsumers { private final Map<ConsumerId, List<ConfiguredMetric>> consumerMetrics; // All consumers for each metric (more useful than the opposite map). - private final Map<ConfiguredMetric, List<ConsumerId>> consumersByMetric; + private final Map<ConfiguredMetric, Set<ConsumerId>> consumersByMetric; public MetricsConsumers(ConsumersConfig config) { consumerMetrics = config.consumer().stream().collect( @@ -47,7 +48,7 @@ public class MetricsConsumers { return consumerMetrics.get(consumer); } - public Map<ConfiguredMetric, List<ConsumerId>> getConsumersByMetric() { + public Map<ConfiguredMetric, Set<ConsumerId>> getConsumersByMetric() { return consumersByMetric; } @@ -59,14 +60,16 @@ public class MetricsConsumers { * Helper function to create mapping from metric to consumers. * TODO: consider reversing the mapping in metrics-consumers.def instead: metric{}.consumer[] */ - private static Map<ConfiguredMetric, List<ConsumerId>> + private static Map<ConfiguredMetric, Set<ConsumerId>> createConsumersByMetric(Map<ConsumerId, List<ConfiguredMetric>> metricsByConsumer) { - Map<ConfiguredMetric, List<ConsumerId>> consumersByMetric = new LinkedHashMap<>(); + Map<ConfiguredMetric, Set<ConsumerId>> consumersByMetric = new LinkedHashMap<>(); metricsByConsumer.forEach( (consumer, metrics) -> metrics.forEach( - metric -> consumersByMetric.computeIfAbsent(metric, unused -> new ArrayList<>()) + metric -> consumersByMetric.computeIfAbsent(metric, unused -> new HashSet<>()) .add(consumer))); - return Collections.unmodifiableMap(consumersByMetric); + Map<ConfiguredMetric, Set<ConsumerId>> unmodifiableConsumersByMetric = new LinkedHashMap<>(); + consumersByMetric.forEach((configuredMetric, consumerIds) -> unmodifiableConsumersByMetric.put(configuredMetric, Set.copyOf(consumerIds))); + return Collections.unmodifiableMap(unmodifiableConsumersByMetric); } public static <T, K, U> Collector<T, ?, Map<K, U>> toUnmodifiableLinkedMap(Function<? super T, ? extends K> keyMapper, diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java index 0c83d8f73c4..a8ef79d827e 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java @@ -12,6 +12,7 @@ import ai.vespa.metricsproxy.metric.model.Dimension; import ai.vespa.metricsproxy.metric.model.DimensionId; import ai.vespa.metricsproxy.metric.model.MetricId; import ai.vespa.metricsproxy.metric.model.MetricsPacket; +import ai.vespa.metricsproxy.service.MetricsParser; import ai.vespa.metricsproxy.service.VespaService; import java.util.ArrayList; @@ -68,29 +69,27 @@ public class VespaMetrics { public List<MetricsPacket.Builder> getMetrics(List<VespaService> services) { List<MetricsPacket.Builder> metricsPackets = new ArrayList<>(); - Map<ConfiguredMetric, List<ConsumerId>> consumersByMetric = metricsConsumers.getConsumersByMetric(); + Map<ConfiguredMetric, Set<ConsumerId>> consumersByMetric = metricsConsumers.getConsumersByMetric(); for (VespaService service : services) { // One metrics packet for system metrics Optional<MetricsPacket.Builder> systemCheck = getSystemMetrics(service); systemCheck.ifPresent(metricsPackets::add); - Metrics allServiceMetrics = service.getMetrics(); + MetricAggregator aggregator = new MetricAggregator(service.getDimensions()); + GetServiceMetricsConsumer metricsConsumer = new GetServiceMetricsConsumer(consumersByMetric, aggregator); + service.consumeMetrics(metricsConsumer); - if (! allServiceMetrics.getMetrics().isEmpty()) { - Metrics serviceMetrics = getServiceMetrics(allServiceMetrics, consumersByMetric); + if (! aggregator.getAggregated().isEmpty()) { // One metrics packet per set of metrics that share the same dimensions+consumers - // TODO: Move aggregation into MetricsPacket itself? - Map<AggregationKey, List<Metric>> aggregatedMetrics = aggregateMetrics(service.getDimensions(), serviceMetrics); - - aggregatedMetrics.forEach((aggregationKey, metrics) -> { + aggregator.getAggregated().forEach((aggregationKey, metrics) -> { MetricsPacket.Builder builder = new MetricsPacket.Builder(service.getMonitoringName()) .putMetrics(metrics) .putDimension(METRIC_TYPE_DIMENSION_ID, "standard") .putDimension(INSTANCE_DIMENSION_ID, service.getInstanceName()) .putDimensions(aggregationKey.getDimensions()); - setMetaInfo(builder, serviceMetrics.getTimeStamp()); + setMetaInfo(builder, metrics.get(0).getTimeStamp()); builder.addConsumers(aggregationKey.getConsumers()); metricsPackets.add(builder); }); @@ -119,15 +118,20 @@ public class VespaMetrics { * In order to include a metric, it must exist in the given map of metric to consumers. * Each returned metric will contain a collection of consumers that it should be routed to. */ - private Metrics getServiceMetrics(Metrics allServiceMetrics, Map<ConfiguredMetric, List<ConsumerId>> consumersByMetric) { - Metrics configuredServiceMetrics = new Metrics(); - configuredServiceMetrics.setTimeStamp(getMostRecentTimestamp(allServiceMetrics)); - for (Metric candidate : allServiceMetrics.getMetrics()) { + private class GetServiceMetricsConsumer implements MetricsParser.Consumer { + private final MetricAggregator aggregator; + private final Map<ConfiguredMetric, Set<ConsumerId>> consumersByMetric; + GetServiceMetricsConsumer(Map<ConfiguredMetric, Set<ConsumerId>> consumersByMetric, MetricAggregator aggregator) { + this.consumersByMetric = consumersByMetric; + this.aggregator = aggregator; + } + + @Override + public void consume(Metric candidate) { getConfiguredMetrics(candidate.getName(), consumersByMetric.keySet()).forEach( - configuredMetric -> configuredServiceMetrics.add( + configuredMetric -> aggregator.aggregate( metricWithConfigProperties(candidate, configuredMetric, consumersByMetric))); } - return configuredServiceMetrics; } private Map<DimensionId, String> extractDimensions(Map<DimensionId, String> dimensions, List<Dimension> configuredDimensions) { @@ -139,21 +143,17 @@ public class VespaMetrics { return dimensions; } - private Set<ConsumerId> extractConsumers(List<ConsumerId> configuredConsumers) { + private Set<ConsumerId> extractConsumers(Set<ConsumerId> configuredConsumers) { Set<ConsumerId> consumers = Collections.emptySet(); if (configuredConsumers != null) { - if ( configuredConsumers.size() == 1) { - consumers = Collections.singleton(configuredConsumers.get(0)); - } else if (configuredConsumers.size() > 1){ - consumers = Set.copyOf(configuredConsumers); - } + consumers = configuredConsumers; } return consumers; } private Metric metricWithConfigProperties(Metric candidate, ConfiguredMetric configuredMetric, - Map<ConfiguredMetric, List<ConsumerId>> consumersByMetric) { + Map<ConfiguredMetric, Set<ConsumerId>> consumersByMetric) { Metric metric = candidate.clone(); metric.setDimensions(extractDimensions(candidate.getDimensions(), configuredMetric.dimension())); metric.setConsumers(extractConsumers(consumersByMetric.get(configuredMetric))); @@ -188,35 +188,27 @@ public class VespaMetrics { return Optional.of(builder); } - private long getMostRecentTimestamp(Metrics metrics) { - long mostRecentTimestamp = 0L; - for (Metric metric : metrics.getMetrics()) { - if (metric.getTimeStamp() > mostRecentTimestamp) { - mostRecentTimestamp = metric.getTimeStamp(); - } + private static class MetricAggregator { + private final Map<AggregationKey, List<Metric>> aggregated = new HashMap<>(); + private final Map<DimensionId, String> serviceDimensions; + MetricAggregator(Map<DimensionId, String> serviceDimensions) { + this.serviceDimensions = serviceDimensions; } - return mostRecentTimestamp; - } - - private Map<AggregationKey, List<Metric>> aggregateMetrics(Map<DimensionId, String> serviceDimensions, - Metrics metrics) { - Map<AggregationKey, List<Metric>> aggregatedMetrics = new HashMap<>(); - - for (Metric metric : metrics.getMetrics() ) { + Map<AggregationKey, List<Metric>> getAggregated() { return aggregated; } + void aggregate(Metric metric) { Map<DimensionId, String> mergedDimensions = new LinkedHashMap<>(); mergedDimensions.putAll(metric.getDimensions()); mergedDimensions.putAll(serviceDimensions); AggregationKey aggregationKey = new AggregationKey(mergedDimensions, metric.getConsumers()); - if (aggregatedMetrics.containsKey(aggregationKey)) { - aggregatedMetrics.get(aggregationKey).add(metric); + if (aggregated.containsKey(aggregationKey)) { + aggregated.get(aggregationKey).add(metric); } else { List<Metric> ml = new ArrayList<>(); ml.add(metric); - aggregatedMetrics.put(aggregationKey, ml); + aggregated.put(aggregationKey, ml); } } - return aggregatedMetrics; } private List<ConfiguredMetric> getMetricDefinitions(ConsumerId consumer) { @@ -232,75 +224,100 @@ public class VespaMetrics { .statusMessage("Data collected successfully"); } + private class MetricStringBuilder implements MetricsParser.Consumer { + private final StringBuilder sb = new StringBuilder(); + private VespaService service; + @Override + public void consume(Metric metric) { + MetricId key = metric.getName(); + MetricId alias = key; + + boolean isForwarded = false; + for (ConfiguredMetric metricConsumer : getMetricDefinitions(vespaMetricsConsumerId)) { + if (metricConsumer.id().equals(key)) { + alias = metricConsumer.outputname(); + isForwarded = true; + } + } + if (isForwarded) { + sb.append(formatter.format(service, alias.id, metric.getValue())).append(" "); + } + } + + @Override + public String toString() { + return sb.toString(); + } + } /** * Returns a string representation of metrics for the given services; * a space separated list of key=value. */ public String getMetricsAsString(List<VespaService> services) { - StringBuilder b = new StringBuilder(); - for (VespaService s : services) { - for (Metric metric : s.getMetrics().getMetrics()) { - MetricId key = metric.getName(); - MetricId alias = key; - - boolean isForwarded = false; - for (ConfiguredMetric metricConsumer : getMetricDefinitions(vespaMetricsConsumerId)) { - if (metricConsumer.id().equals(key)) { - alias = metricConsumer.outputname(); - isForwarded = true; + MetricStringBuilder msb = new MetricStringBuilder(); + for (VespaService service : services) { + msb.service = service; + service.consumeMetrics(msb); + } + return msb.toString(); + } + + private class MetricNamesBuilder implements MetricsParser.Consumer { + private final StringBuilder bufferOn = new StringBuilder(); + private final StringBuilder bufferOff = new StringBuilder(); + private final ConsumerId consumer; + MetricNamesBuilder(ConsumerId consumer) { + this.consumer = consumer; + } + @Override + public void consume(Metric m) { + String description = m.getDescription(); + MetricId alias = MetricId.empty; + boolean isForwarded = false; + + for (ConfiguredMetric metric : getMetricDefinitions(consumer)) { + if (metric.id().equals(m.getName())) { + alias = metric.outputname(); + isForwarded = true; + if (description.isEmpty()) { + description = metric.description(); } } - if (isForwarded) { - b.append(formatter.format(s, alias.id, metric.getValue())).append(" "); - } } + + String message = "OFF"; + StringBuilder buffer = bufferOff; + if (isForwarded) { + buffer = bufferOn; + message = "ON"; + } + buffer.append(m.getName()).append('=').append(message); + if (!description.isEmpty()) { + buffer.append(";description=").append(description); + } + if (!alias.id.isEmpty()) { + buffer.append(";output-name=").append(alias); + } + buffer.append(','); } - return b.toString(); - } + @Override + public String toString() { + return bufferOn.append(bufferOff).toString(); + } + } /** * Get all metric names for the given services * * @return String representation */ public String getMetricNames(List<VespaService> services, ConsumerId consumer) { - StringBuilder bufferOn = new StringBuilder(); - StringBuilder bufferOff = new StringBuilder(); - for (VespaService s : services) { - - for (Metric m : s.getMetrics().getMetrics()) { - String description = m.getDescription(); - MetricId alias = MetricId.empty; - boolean isForwarded = false; - - for (ConfiguredMetric metric : getMetricDefinitions(consumer)) { - if (metric.id().equals(m.getName())) { - alias = metric.outputname(); - isForwarded = true; - if (description.isEmpty()) { - description = metric.description(); - } - } - } - - String message = "OFF"; - StringBuilder buffer = bufferOff; - if (isForwarded) { - buffer = bufferOn; - message = "ON"; - } - buffer.append(m.getName()).append('=').append(message); - if (!description.isEmpty()) { - buffer.append(";description=").append(description); - } - if (!alias.id.isEmpty()) { - buffer.append(";output-name=").append(alias); - } - buffer.append(','); - } + MetricNamesBuilder metricNamesBuilder = new MetricNamesBuilder(consumer); + for (VespaService service : services) { + service.consumeMetrics(metricNamesBuilder); } - return bufferOn.append(bufferOff).toString(); + return metricNamesBuilder.toString(); } } diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/DimensionId.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/DimensionId.java index b99f720cb03..621cc382947 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/DimensionId.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/DimensionId.java @@ -1,6 +1,9 @@ // Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.metricsproxy.metric.model; +import com.yahoo.concurrent.CopyOnWriteHashMap; + +import java.util.Map; import java.util.Objects; /** @@ -8,10 +11,13 @@ import java.util.Objects; */ public final class DimensionId { + private static final Map<String, DimensionId> dictionary = new CopyOnWriteHashMap<>(); public final String id; private DimensionId(String id) { this.id = id; } - public static DimensionId toDimensionId(String id) { return new DimensionId(id); } + public static DimensionId toDimensionId(String id) { + return dictionary.computeIfAbsent(id, key -> new DimensionId(key)); + } @Override public boolean equals(Object o) { diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/MetricId.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/MetricId.java index dcee4100f98..564628b9952 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/MetricId.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/MetricId.java @@ -1,6 +1,9 @@ // Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.metricsproxy.metric.model; +import com.yahoo.concurrent.CopyOnWriteHashMap; + +import java.util.Map; import java.util.Objects; /** @@ -8,11 +11,14 @@ import java.util.Objects; */ public class MetricId { + private static final Map<String, MetricId> dictionary = new CopyOnWriteHashMap<>(); public static final MetricId empty = toMetricId(""); public final String id; private MetricId(String id) { this.id = id; } - public static MetricId toMetricId(String id) { return new MetricId(id); } + public static MetricId toMetricId(String id) { + return dictionary.computeIfAbsent(id, key -> new MetricId(key)); + } @Override public boolean equals(Object o) { diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/DummyMetricsFetcher.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/DummyMetricsFetcher.java index b304e5d74d3..e9fbc942e6e 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/DummyMetricsFetcher.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/DummyMetricsFetcher.java @@ -1,8 +1,6 @@ // Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.metricsproxy.service; -import ai.vespa.metricsproxy.metric.Metrics; - /** * Dummy class used for getting health status for a vespa service that has no HTTP service * for getting metrics @@ -21,7 +19,6 @@ public class DummyMetricsFetcher extends RemoteMetricsFetcher { /** * Connect to remote service over http and fetch metrics */ - public Metrics getMetrics(int fetchCount) { - return new Metrics(); + public void getMetrics(MetricsParser.Consumer consumer, int fetchCount) { } } diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java index 48621b9abab..22812c07b78 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java @@ -2,7 +2,6 @@ package ai.vespa.metricsproxy.service; import ai.vespa.metricsproxy.metric.Metric; -import ai.vespa.metricsproxy.metric.Metrics; import ai.vespa.metricsproxy.metric.model.DimensionId; import ai.vespa.metricsproxy.metric.model.MetricId; import com.fasterxml.jackson.core.JsonParser; @@ -25,59 +24,61 @@ import static ai.vespa.metricsproxy.metric.model.DimensionId.toDimensionId; * @author Jo Kristian Bergum */ public class MetricsParser { + public interface Consumer { + void consume(Metric metric); + } private static final ObjectMapper jsonMapper = new ObjectMapper(); - static Metrics parse(String data) throws IOException { - return parse(jsonMapper.createParser(data)); + static void parse(String data, Consumer consumer) throws IOException { + parse(jsonMapper.createParser(data), consumer); } - static Metrics parse(InputStream data) throws IOException { - return parse(jsonMapper.createParser(data)); + static void parse(InputStream data, Consumer consumer) throws IOException { + parse(jsonMapper.createParser(data), consumer); } - private static Metrics parse(JsonParser parser) throws IOException { + private static void parse(JsonParser parser, Consumer consumer) throws IOException { if (parser.nextToken() != JsonToken.START_OBJECT) { throw new IOException("Expected start of object, got " + parser.currentToken()); } - Metrics metrics = new Metrics(); for (parser.nextToken(); parser.getCurrentToken() != JsonToken.END_OBJECT; parser.nextToken()) { String fieldName = parser.getCurrentName(); JsonToken token = parser.nextToken(); if (fieldName.equals("metrics")) { - metrics = parseMetrics(parser); + parseMetrics(parser, consumer); } else { if (token == JsonToken.START_OBJECT || token == JsonToken.START_ARRAY) { parser.skipChildren(); } } } - return metrics; } - - static private Metrics parseSnapshot(JsonParser parser) throws IOException { + private static long secondsSince1970UTC() { + return System.currentTimeMillis() / 1000L; + } + static private long parseSnapshot(JsonParser parser) throws IOException { if (parser.getCurrentToken() != JsonToken.START_OBJECT) { throw new IOException("Expected start of 'snapshot' object, got " + parser.currentToken()); } - Metrics metrics = new Metrics(); + long timestamp = secondsSince1970UTC(); for (parser.nextToken(); parser.getCurrentToken() != JsonToken.END_OBJECT; parser.nextToken()) { String fieldName = parser.getCurrentName(); JsonToken token = parser.nextToken(); if (fieldName.equals("to")) { - long timestamp = parser.getLongValue(); + timestamp = parser.getLongValue(); long now = System.currentTimeMillis() / 1000; timestamp = Metric.adjustTime(timestamp, now); - metrics = new Metrics(timestamp); } else { if (token == JsonToken.START_OBJECT || token == JsonToken.START_ARRAY) { parser.skipChildren(); } } } - return metrics; + return timestamp; } - static private void parseValues(JsonParser parser, Metrics metrics) throws IOException { + static private void parseValues(JsonParser parser, long timestamp, Consumer consumer) throws IOException { if (parser.getCurrentToken() != JsonToken.START_ARRAY) { throw new IOException("Expected start of 'metrics:values' array, got " + parser.currentToken()); } @@ -87,34 +88,34 @@ public class MetricsParser { // read everything from this START_OBJECT to the matching END_OBJECT // and return it as a tree model ObjectNode JsonNode value = jsonMapper.readTree(parser); - handleValue(value, metrics.getTimeStamp(), metrics, uniqueDimensions); + handleValue(value, timestamp, consumer, uniqueDimensions); // do whatever you need to do with this object } } - static private Metrics parseMetrics(JsonParser parser) throws IOException { + static private void parseMetrics(JsonParser parser, Consumer consumer) throws IOException { if (parser.getCurrentToken() != JsonToken.START_OBJECT) { throw new IOException("Expected start of 'metrics' object, got " + parser.currentToken()); } - Metrics metrics = new Metrics(); + long timestamp = System.currentTimeMillis() / 1000L; for (parser.nextToken(); parser.getCurrentToken() != JsonToken.END_OBJECT; parser.nextToken()) { String fieldName = parser.getCurrentName(); JsonToken token = parser.nextToken(); if (fieldName.equals("snapshot")) { - metrics = parseSnapshot(parser); + timestamp = parseSnapshot(parser); } else if (fieldName.equals("values")) { - parseValues(parser, metrics); + parseValues(parser, timestamp, consumer); } else { if (token == JsonToken.START_OBJECT || token == JsonToken.START_ARRAY) { parser.skipChildren(); } } } - return metrics; } - static private void handleValue(JsonNode metric, long timestamp, Metrics metrics, Map<String, Map<DimensionId, String>> uniqueDimensions) { + static private void handleValue(JsonNode metric, long timestamp, Consumer consumer, + Map<String, Map<DimensionId, String>> uniqueDimensions) { String name = metric.get("name").textValue(); String description = ""; @@ -155,7 +156,7 @@ public class MetricsParser { throw new IllegalArgumentException("Value for aggregator '" + aggregator + "' is not a number"); } String metricName = new StringBuilder().append(name).append(".").append(aggregator).toString(); - metrics.add(new Metric(MetricId.toMetricId(metricName), value, timestamp, dim, description)); + consumer.consume(new Metric(MetricId.toMetricId(metricName), value, timestamp, dim, description)); } } } diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteMetricsFetcher.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteMetricsFetcher.java index f2cb5c4e8b3..8acaa0fb58e 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteMetricsFetcher.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteMetricsFetcher.java @@ -23,32 +23,25 @@ public class RemoteMetricsFetcher extends HttpMetricFetcher { /** * Connect to remote service over http and fetch metrics */ - public Metrics getMetrics(int fetchCount) { + public void getMetrics(MetricsParser.Consumer consumer, int fetchCount) { try { - return createMetrics(getJson(), fetchCount); + createMetrics(getJson(), consumer, fetchCount); } catch (IOException | InterruptedException | ExecutionException e) { - return new Metrics(); } } - Metrics createMetrics(String data, int fetchCount) { - Metrics remoteMetrics = new Metrics(); + void createMetrics(String data, MetricsParser.Consumer consumer, int fetchCount) { try { - remoteMetrics = MetricsParser.parse(data); + MetricsParser.parse(data, consumer); } catch (Exception e) { handleException(e, data, fetchCount); } - - return remoteMetrics; } - Metrics createMetrics(InputStream data, int fetchCount) { - Metrics remoteMetrics = new Metrics(); + private void createMetrics(InputStream data, MetricsParser.Consumer consumer, int fetchCount) { try { - remoteMetrics = MetricsParser.parse(data); + MetricsParser.parse(data, consumer); } catch (Exception e) { handleException(e, data, fetchCount); } - - return remoteMetrics; } } diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/VespaService.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/VespaService.java index 9d165b2d5a9..c6e0b202985 100644 --- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/VespaService.java +++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/VespaService.java @@ -2,6 +2,7 @@ package ai.vespa.metricsproxy.service; import ai.vespa.metricsproxy.metric.HealthMetric; +import ai.vespa.metricsproxy.metric.Metric; import ai.vespa.metricsproxy.metric.Metrics; import ai.vespa.metricsproxy.metric.model.DimensionId; import ai.vespa.metricsproxy.metric.model.ServiceId; @@ -134,13 +135,23 @@ public class VespaService implements Comparable<VespaService> { /** * Get the Metrics registered for this service. Metrics are fetched over HTTP * if a metric http port has been defined, otherwise from log file - * - * @return the non-system metrics */ - public Metrics getMetrics() { - Metrics remoteMetrics = remoteMetricsFetcher.getMetrics(metricsFetchCount.get()); + public void consumeMetrics(MetricsParser.Consumer consumer) { + remoteMetricsFetcher.getMetrics(consumer, metricsFetchCount.get()); metricsFetchCount.getAndIncrement(); - return remoteMetrics; + } + + private static class CollectMetrics implements MetricsParser.Consumer { + private final Metrics metrics = new Metrics(); + @Override + public void consume(Metric metric) { + metrics.add(metric); + } + } + public final Metrics getMetrics() { + CollectMetrics collector = new CollectMetrics(); + consumeMetrics(collector); + return collector.metrics; } /** diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/service/DownService.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/service/DownService.java index d39cabcbf93..72277f54d88 100644 --- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/service/DownService.java +++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/service/DownService.java @@ -18,8 +18,7 @@ public class DownService extends VespaService { } @Override - public Metrics getMetrics() { - return new Metrics(); + public void consumeMetrics(MetricsParser.Consumer consumer) { } @Override diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/service/DummyService.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/service/DummyService.java index ccbb237ae37..28b4db12855 100644 --- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/service/DummyService.java +++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/service/DummyService.java @@ -2,7 +2,6 @@ package ai.vespa.metricsproxy.service; import ai.vespa.metricsproxy.metric.Metric; -import ai.vespa.metricsproxy.metric.Metrics; import ai.vespa.metricsproxy.metric.model.MetricId; /** @@ -21,14 +20,10 @@ public class DummyService extends VespaService { } @Override - public Metrics getMetrics() { - Metrics m = new Metrics(); - + public void consumeMetrics(MetricsParser.Consumer consumer) { long timestamp = System.currentTimeMillis() / 1000; - m.add(new Metric(MetricId.toMetricId(METRIC_1), 5 * num + 1, timestamp)); - m.add(new Metric(MetricId.toMetricId(METRIC_2), 1.3 * num + 1.05, timestamp)); - - return m; + consumer.consume(new Metric(MetricId.toMetricId(METRIC_1), 5 * num + 1, timestamp)); + consumer.consume(new Metric(MetricId.toMetricId(METRIC_2), 1.3 * num + 1.05, timestamp)); } } diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/service/MetricsFetcherTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/service/MetricsFetcherTest.java index defcf4bfaaa..ed3e8ef3d56 100644 --- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/service/MetricsFetcherTest.java +++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/service/MetricsFetcherTest.java @@ -2,6 +2,7 @@ package ai.vespa.metricsproxy.service; import ai.vespa.metricsproxy.TestUtil; +import ai.vespa.metricsproxy.metric.Metric; import ai.vespa.metricsproxy.metric.Metrics; import ai.vespa.metricsproxy.metric.model.MetricId; import org.junit.Test; @@ -15,11 +16,24 @@ public class MetricsFetcherTest { private static int port = 9; //port number is not used in this test + private class MetricsConsumer implements MetricsParser.Consumer { + Metrics metrics = new Metrics(); + @Override + public void consume(Metric metric) { + metrics.add(metric); + } + } + Metrics fetch(String data) { + RemoteMetricsFetcher fetcher = new RemoteMetricsFetcher(new DummyService(0, "dummy/id/0"), port); + MetricsConsumer consumer = new MetricsConsumer(); + fetcher.createMetrics(data, consumer, 0); + return consumer.metrics; + } + @Test public void testStateFormatMetricsParse() { String jsonData = TestUtil.getFileContents("metrics-state.json"); - RemoteMetricsFetcher fetcher = new RemoteMetricsFetcher(new DummyService(0, "dummy/id/0"), port); - Metrics metrics = fetcher.createMetrics(jsonData, 0); + Metrics metrics = fetch(jsonData); assertThat(metrics.size(), is(10)); assertThat(metrics.getMetric(MetricId.toMetricId("query_hits.count")).getValue().intValue(), is(28)); assertThat(metrics.getMetric(MetricId.toMetricId("queries.rate")).getValue().doubleValue(), is(0.4667)); @@ -29,8 +43,7 @@ public class MetricsFetcherTest { @Test public void testEmptyJson() { String jsonData = "{}"; - RemoteMetricsFetcher fetcher = new RemoteMetricsFetcher(new DummyService(0, "dummy/id/0"), port); - Metrics metrics = fetcher.createMetrics(jsonData, 0); + Metrics metrics = fetch(jsonData); assertThat("Wrong number of metrics", metrics.size(), is(0)); } @@ -39,10 +52,8 @@ public class MetricsFetcherTest { String jsonData; Metrics metrics; - RemoteMetricsFetcher fetcher = new RemoteMetricsFetcher(new DummyService(0, "dummy/id/0"), port); - jsonData = ""; - metrics = fetcher.createMetrics(jsonData, 0); + metrics = fetch(jsonData); assertThat("Wrong number of metrics", metrics.size(), is(0)); jsonData = "{\n" + @@ -51,7 +62,7 @@ public class MetricsFetcherTest { " \"message\" : \"Everything ok here\"\n" + "}\n" + "}"; - metrics = fetcher.createMetrics(jsonData, 0); + metrics = fetch(jsonData); assertThat("Wrong number of metrics", metrics.size(), is(0)); jsonData = "{\n" + @@ -80,7 +91,7 @@ public class MetricsFetcherTest { "}\n" + "}"; - metrics = fetcher.createMetrics(jsonData, 0); + metrics = fetch(jsonData); assertThat("Wrong number of metrics", metrics.size(), is(0)); } } diff --git a/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/OnnxEvaluator.java b/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/OnnxEvaluator.java index a306d09b3c1..51d54d719c2 100644 --- a/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/OnnxEvaluator.java +++ b/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/OnnxEvaluator.java @@ -28,7 +28,9 @@ public class OnnxEvaluator { public OnnxEvaluator(String modelPath) { try { environment = OrtEnvironment.getEnvironment(); - session = environment.createSession(modelPath, new OrtSession.SessionOptions()); + OrtSession.SessionOptions options = new OrtSession.SessionOptions(); + options.setOptimizationLevel(OrtSession.SessionOptions.OptLevel.ALL_OPT); + session = environment.createSession(modelPath, options); } catch (OrtException e) { throw new RuntimeException("ONNX Runtime exception", e); } diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModel.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModel.java index cf92cbc1e89..0152669ef78 100644 --- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModel.java +++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModel.java @@ -13,7 +13,6 @@ import com.yahoo.tensor.TensorType; import java.io.File; import java.io.IOException; -import java.io.StringReader; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModels.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModels.java deleted file mode 100644 index fc576df0f09..00000000000 --- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModels.java +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.rankingexpression.importer; - -import com.yahoo.path.Path; - -import java.io.File; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; - -// TODO: Remove this class after November 2018 -public class ImportedModels { - - /** All imported models, indexed by their names */ - private final Map<String, ImportedModel> importedModels; - - /** Create a null imported models */ - public ImportedModels() { - importedModels = Collections.emptyMap(); - } - - public ImportedModels(File modelsDirectory, Collection<ModelImporter> importers) { - Map<String, ImportedModel> models = new HashMap<>(); - - // Find all subdirectories recursively which contains a model we can read - importRecursively(modelsDirectory, models, importers); - importedModels = Collections.unmodifiableMap(models); - } - - /** - * Returns the model at the given location in the application package. - * - * @param modelPath the path to this model (file or directory, depending on model type) - * under the application package, both from the root or relative to the - * models directory works - * @return the model at this path or null if none - */ - public ImportedModel get(File modelPath) { - return importedModels.get(toName(modelPath)); - } - - /** Returns an immutable collection of all the imported models */ - public Collection<ImportedModel> all() { - return importedModels.values(); - } - - private static void importRecursively(File dir, - Map<String, ImportedModel> models, - Collection<ModelImporter> importers) { - if ( ! dir.isDirectory()) return; - - Arrays.stream(dir.listFiles()).sorted().forEach(child -> { - Optional<ModelImporter> importer = findImporterOf(child, importers); - if (importer.isPresent()) { - String name = toName(child); - ImportedModel existing = models.get(name); - if (existing != null) - throw new IllegalArgumentException("The models in " + child + " and " + existing.source() + - " both resolve to the model name '" + name + "'"); - models.put(name, importer.get().importModel(name, child)); - } - else { - importRecursively(child, models, importers); - } - }); - } - - private static Optional<ModelImporter> findImporterOf(File path, Collection<ModelImporter> importers) { - return importers.stream().filter(item -> item.canImport(path.toString())).findFirst(); - } - - private static String toName(File modelFile) { - Path modelPath = Path.fromString(modelFile.toString()); - if (modelFile.isFile()) - modelPath = stripFileEnding(modelPath); - String localPath = concatenateAfterModelsDirectory(modelPath); - return localPath.replace('.', '_'); - } - - private static Path stripFileEnding(Path path) { - int dotIndex = path.last().lastIndexOf("."); - if (dotIndex <= 0) return path; - return path.withLast(path.last().substring(0, dotIndex)); - } - - private static String concatenateAfterModelsDirectory(Path path) { - boolean afterModels = false; - StringBuilder result = new StringBuilder(); - for (String element : path.elements()) { - if (afterModels) result.append(element).append("_"); - if (element.equals("models")) afterModels = true; - } - return result.substring(0, result.length()-1); - } - -} diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java index 4039de85e31..294a4782001 100644 --- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java +++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java @@ -1,6 +1,7 @@ // Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.rankingexpression.importer.configmodelview; +import com.yahoo.concurrent.InThreadExecutorService; import com.yahoo.path.Path; import java.io.File; @@ -10,6 +11,10 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; /** * All models imported from the models/ directory in the application package. @@ -24,18 +29,35 @@ public class ImportedMlModels { private final Map<String, ImportedMlModel> importedModels; /** Models that were not imported due to some error */ - private final Map<String, String> skippedModels = new HashMap<>(); + private final Map<String, String> skippedModels = new ConcurrentHashMap<>(); /** Create a null imported models */ public ImportedMlModels() { importedModels = Collections.emptyMap(); } + /** Will disappear shortly */ + @Deprecated public ImportedMlModels(File modelsDirectory, Collection<MlModelImporter> importers) { - Map<String, ImportedMlModel> models = new HashMap<>(); + this(modelsDirectory, new InThreadExecutorService(), importers); + } + + public ImportedMlModels(File modelsDirectory, ExecutorService executor, Collection<MlModelImporter> importers) { + Map<String, Future<ImportedMlModel>> futureModels = new HashMap<>(); // Find all subdirectories recursively which contains a model we can read - importRecursively(modelsDirectory, models, importers, skippedModels); + importRecursively(modelsDirectory, executor, futureModels, importers, skippedModels); + Map<String, ImportedMlModel> models = new HashMap<>(); + futureModels.forEach((name, future) -> { + try { + ImportedMlModel model = future.get(); + if (model != null) { + models.put(name, model); + } + } catch (InterruptedException | ExecutionException e) { + skippedModels.put(name, e.getMessage()); + } + }); importedModels = Collections.unmodifiableMap(models); } @@ -61,7 +83,8 @@ public class ImportedMlModels { } private static void importRecursively(File dir, - Map<String, ImportedMlModel> models, + ExecutorService executor, + Map<String, Future<ImportedMlModel>> models, Collection<MlModelImporter> importers, Map<String, String> skippedModels) { if ( ! dir.isDirectory()) return; @@ -70,19 +93,26 @@ public class ImportedMlModels { Optional<MlModelImporter> importer = findImporterOf(child, importers); if (importer.isPresent()) { String name = toName(child); - ImportedMlModel existing = models.get(name); - if (existing != null) - throw new IllegalArgumentException("The models in " + child + " and " + existing.source() + - " both resolve to the model name '" + name + "'"); - try { - ImportedMlModel importedModel = importer.get().importModel(name, child); - models.put(name, importedModel); - } catch (RuntimeException e) { - skippedModels.put(name, e.getMessage()); + Future<ImportedMlModel> existing = models.get(name); + if (existing != null) { + try { + throw new IllegalArgumentException("The models in " + child + " and " + existing.get().source() + + " both resolve to the model name '" + name + "'"); + } catch (InterruptedException | ExecutionException e) {} } + + Future<ImportedMlModel> future = executor.submit(() -> { + try { + return importer.get().importModel(name, child); + } catch (RuntimeException e) { + skippedModels.put(name, e.getMessage()); + } + return null; + }); + models.put(name, future); } else { - importRecursively(child, models, importers, skippedModels); + importRecursively(child, executor, models, importers, skippedModels); } }); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/RealConfigServerClients.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/RealConfigServerClients.java index 061a06f4687..7d52b9d72b0 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/RealConfigServerClients.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/RealConfigServerClients.java @@ -2,7 +2,6 @@ package com.yahoo.vespa.hosted.node.admin.configserver; import com.yahoo.vespa.flags.FlagRepository; -import com.yahoo.vespa.flags.FlagSource; import com.yahoo.vespa.hosted.node.admin.configserver.flags.RealFlagRepository; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeRepository; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.RealNodeRepository; @@ -27,9 +26,9 @@ public class RealConfigServerClients implements ConfigServerClients { /** * @param configServerApi the backend API to use - will be closed at {@link #stop()}. */ - public RealConfigServerClients(ConfigServerApi configServerApi, FlagSource flagSource) { + public RealConfigServerClients(ConfigServerApi configServerApi) { this.configServerApi = configServerApi; - nodeRepository = new RealNodeRepository(configServerApi, flagSource); + nodeRepository = new RealNodeRepository(configServerApi); orchestrator = new OrchestratorImpl(configServerApi); state = new StateImpl(configServerApi); flagRepository = new RealFlagRepository(configServerApi); diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/Event.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/Event.java new file mode 100644 index 00000000000..ca374533940 --- /dev/null +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/Event.java @@ -0,0 +1,54 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.node.admin.configserver.noderepository; + +import java.time.Instant; +import java.util.Objects; + +/** + * @author freva + */ +public class Event { + private final String agent; + private final String type; + private final Instant at; + + public Event(String agent, String type, Instant at) { + this.agent = Objects.requireNonNull(agent); + this.type = Objects.requireNonNull(type); + this.at = Objects.requireNonNull(at); + } + + public String agent() { + return agent; + } + + public String type() { + return type; + } + + public Instant at() { + return at; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Event event1 = (Event) o; + return agent.equals(event1.agent) && type.equals(event1.type) && at.equals(event1.at); + } + + @Override + public int hashCode() { + return Objects.hash(agent, type, at); + } + + @Override + public String toString() { + return "Event{" + + "agent='" + agent + '\'' + + ", type='" + type + '\'' + + ", at=" + at + + '}'; + } +} diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java index 30bc1ef5ea3..e85d51ef992 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java @@ -12,6 +12,7 @@ import com.yahoo.vespa.hosted.node.admin.task.util.file.DiskSize; import java.net.URI; import java.time.Instant; import java.util.EnumSet; +import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.Set; @@ -60,6 +61,7 @@ public class NodeSpec { private final Set<String> additionalIpAddresses; private final NodeReports reports; + private final List<Event> events; private final Optional<String> parentHostname; private final Optional<URI> archiveUri; @@ -93,6 +95,7 @@ public class NodeSpec { Set<String> ipAddresses, Set<String> additionalIpAddresses, NodeReports reports, + List<Event> events, Optional<String> parentHostname, Optional<URI> archiveUri, Optional<ApplicationId> exclusiveTo) { @@ -128,9 +131,10 @@ public class NodeSpec { this.currentFirmwareCheck = Objects.requireNonNull(currentFirmwareCheck); this.resources = Objects.requireNonNull(resources); this.realResources = Objects.requireNonNull(realResources); - this.ipAddresses = Objects.requireNonNull(ipAddresses); - this.additionalIpAddresses = Objects.requireNonNull(additionalIpAddresses); + this.ipAddresses = Set.copyOf(ipAddresses); + this.additionalIpAddresses = Set.copyOf(additionalIpAddresses); this.reports = Objects.requireNonNull(reports); + this.events = List.copyOf(events); this.parentHostname = Objects.requireNonNull(parentHostname); this.archiveUri = Objects.requireNonNull(archiveUri); this.exclusiveTo = Objects.requireNonNull(exclusiveTo); @@ -263,6 +267,10 @@ public class NodeSpec { public NodeReports reports() { return reports; } + public List<Event> events() { + return events; + } + public Optional<String> parentHostname() { return parentHostname; } @@ -308,6 +316,7 @@ public class NodeSpec { Objects.equals(ipAddresses, that.ipAddresses) && Objects.equals(additionalIpAddresses, that.additionalIpAddresses) && Objects.equals(reports, that.reports) && + Objects.equals(events, that.events) && Objects.equals(parentHostname, that.parentHostname) && Objects.equals(archiveUri, that.archiveUri) && Objects.equals(exclusiveTo, that.exclusiveTo); @@ -342,6 +351,7 @@ public class NodeSpec { ipAddresses, additionalIpAddresses, reports, + events, parentHostname, archiveUri, exclusiveTo); @@ -376,6 +386,7 @@ public class NodeSpec { + " ipAddresses=" + ipAddresses + " additionalIpAddresses=" + additionalIpAddresses + " reports=" + reports + + " events=" + events + " parentHostname=" + parentHostname + " archiveUri=" + archiveUri + " exclusiveTo=" + exclusiveTo @@ -409,6 +420,7 @@ public class NodeSpec { private Set<String> ipAddresses = Set.of(); private Set<String> additionalIpAddresses = Set.of(); private NodeReports reports = new NodeReports(); + private List<Event> events = List.of(); private Optional<String> parentHostname = Optional.empty(); private Optional<URI> archiveUri = Optional.empty(); private Optional<ApplicationId> exclusiveTo = Optional.empty(); @@ -428,6 +440,7 @@ public class NodeSpec { currentRebootGeneration(node.currentRebootGeneration); orchestratorStatus(node.orchestratorStatus); reports(new NodeReports(node.reports)); + events(node.events); node.wantedDockerImage.ifPresent(this::wantedDockerImage); node.currentDockerImage.ifPresent(this::currentDockerImage); node.wantedVespaVersion.ifPresent(this::wantedVespaVersion); @@ -600,6 +613,11 @@ public class NodeSpec { return this; } + public Builder events(List<Event> events) { + this.events = events; + return this; + } + public Builder parentHostname(String parentHostname) { this.parentHostname = Optional.of(parentHostname); return this; @@ -714,6 +732,10 @@ public class NodeSpec { return reports; } + public List<Event> events() { + return events; + } + public Optional<String> parentHostname() { return parentHostname; } @@ -730,7 +752,7 @@ public class NodeSpec { wantedRebootGeneration, currentRebootGeneration, wantedFirmwareCheck, currentFirmwareCheck, modelName, resources, realResources, ipAddresses, additionalIpAddresses, - reports, parentHostname, archiveUri, exclusiveTo); + reports, events, parentHostname, archiveUri, exclusiveTo); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java index 8934100a463..abc779d8a9a 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java @@ -9,10 +9,6 @@ import com.yahoo.config.provision.DockerImage; import com.yahoo.config.provision.NodeResources; import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.host.FlavorOverrides; -import com.yahoo.vespa.flags.BooleanFlag; -import com.yahoo.vespa.flags.FetchVector; -import com.yahoo.vespa.flags.FlagSource; -import com.yahoo.vespa.flags.Flags; import com.yahoo.vespa.hosted.node.admin.configserver.ConfigServerApi; import com.yahoo.vespa.hosted.node.admin.configserver.HttpException; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.bindings.GetAclResponse; @@ -41,11 +37,9 @@ public class RealNodeRepository implements NodeRepository { private static final Logger logger = Logger.getLogger(RealNodeRepository.class.getName()); private final ConfigServerApi configServerApi; - private final BooleanFlag useRealResourcesFlag; - public RealNodeRepository(ConfigServerApi configServerApi, FlagSource flagSource) { + public RealNodeRepository(ConfigServerApi configServerApi) { this.configServerApi = configServerApi; - this.useRealResourcesFlag = Flags.USE_REAL_RESOURCES.bindTo(flagSource); } @Override @@ -65,7 +59,7 @@ public class RealNodeRepository implements NodeRepository { final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class); return nodesForHost.nodes.stream() - .map(this::createNodeSpec) + .map(RealNodeRepository::createNodeSpec) .collect(Collectors.toList()); } @@ -75,7 +69,7 @@ public class RealNodeRepository implements NodeRepository { NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName, NodeRepositoryNode.class); - return Optional.ofNullable(nodeResponse).map(this::createNodeSpec); + return Optional.ofNullable(nodeResponse).map(RealNodeRepository::createNodeSpec); } catch (HttpException.NotFoundException | HttpException.ForbiddenException e) { // Return empty on 403 in addition to 404 as it likely means we're trying to access a node that // has been deleted. When a node is deleted, the parent-child relationship no longer exists and @@ -147,7 +141,7 @@ public class RealNodeRepository implements NodeRepository { throw new NodeRepositoryException("Failed to set node state: " + response.message + " " + response.errorCode); } - private NodeSpec createNodeSpec(NodeRepositoryNode node) { + private static NodeSpec createNodeSpec(NodeRepositoryNode node) { Objects.requireNonNull(node.type, "Unknown node type"); NodeType nodeType = NodeType.valueOf(node.type); @@ -157,9 +151,10 @@ public class RealNodeRepository implements NodeRepository { Optional<NodeMembership> membership = Optional.ofNullable(node.membership) .map(m -> new NodeMembership(m.clusterType, m.clusterId, m.group, m.index, m.retired)); NodeReports reports = NodeReports.fromMap(Optional.ofNullable(node.reports).orElseGet(Map::of)); - boolean useRealResources = useRealResourcesFlag.with(FetchVector.Dimension.CLUSTER_TYPE, membership.map(m -> m.type().value())) - .with(FetchVector.Dimension.NODE_TYPE, nodeType.name()) - .value(); + List<Event> events = node.history.stream() + .map(event -> new Event(event.agent, event.event, Optional.ofNullable(event.at).map(Instant::ofEpochMilli).orElse(Instant.EPOCH))) + .collect(Collectors.toUnmodifiableList()); + return new NodeSpec( node.hostname, Optional.ofNullable(node.openStackId), @@ -183,10 +178,11 @@ public class RealNodeRepository implements NodeRepository { Optional.ofNullable(node.currentFirmwareCheck).map(Instant::ofEpochMilli), Optional.ofNullable(node.modelName), nodeResources(node.resources), - nodeResources(useRealResources ? node.realResources : node.resources), + nodeResources(node.realResources), node.ipAddresses, node.additionalIpAddresses, reports, + events, Optional.ofNullable(node.parentHostname), Optional.ofNullable(node.archiveUri).map(URI::create), Optional.ofNullable(node.exclusiveTo).map(ApplicationId::fromSerializedForm)); diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java index 86caab9bf51..4282c67b4cd 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java @@ -6,6 +6,7 @@ import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.JsonNode; +import java.util.List; import java.util.Map; import java.util.Set; @@ -82,6 +83,8 @@ public class NodeRepositoryNode { public String archiveUri; @JsonProperty("exclusiveTo") public String exclusiveTo; + @JsonProperty("history") + public List<Event> history; @JsonProperty("reports") public Map<String, JsonNode> reports = null; @@ -123,6 +126,7 @@ public class NodeRepositoryNode { ", archiveUri=" + archiveUri + ", reports=" + reports + ", exclusiveTo=" + exclusiveTo + + ", history=" + history + '}'; } @@ -198,4 +202,23 @@ public class NodeRepositoryNode { } } + @JsonIgnoreProperties(ignoreUnknown = true) + @JsonInclude(JsonInclude.Include.NON_NULL) + public static class Event { + @JsonProperty + public String event; + @JsonProperty + public String agent; + @JsonProperty + public Long at; + + @Override + public String toString() { + return "Event{" + + "agent=" + agent + + ", event=" + event + + ", at=" + at + + '}'; + } + } } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/ArtifactProducer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/ArtifactProducer.java new file mode 100644 index 00000000000..24f070bbc35 --- /dev/null +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/ArtifactProducer.java @@ -0,0 +1,20 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.node.admin.maintenance.servicedump; + +import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext; +import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixPath; + +import java.io.IOException; + +/** + * Produces service dump artifacts. + * + * @author bjorncs + */ +interface ArtifactProducer { + + String name(); + + void produceArtifact(NodeAgentContext context, String configId, UnixPath resultDirectoryInNode) throws IOException; + +} diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/JvmDumpProducer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/JvmDumpProducer.java new file mode 100644 index 00000000000..bc7703ba03a --- /dev/null +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/JvmDumpProducer.java @@ -0,0 +1,43 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.node.admin.maintenance.servicedump; + +import com.yahoo.vespa.hosted.node.admin.container.ContainerOperations; +import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext; +import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixPath; +import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult; + +import java.io.IOException; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Creates a dump of JVM based Vespa services using vespa-jvm-dumper + * + * @author bjorncs + */ +class JvmDumpProducer implements ArtifactProducer { + + private static final Logger log = Logger.getLogger(JvmDumpProducer.class.getName()); + + private final ContainerOperations container; + + JvmDumpProducer(ContainerOperations container) { this.container = container; } + + public static String NAME = "jvm-dump"; + + @Override public String name() { return NAME; } + + @Override + public void produceArtifact(NodeAgentContext context, String configId, UnixPath resultDirectoryInNode) throws IOException { + UnixPath vespaJvmDumper = new UnixPath(context.pathInNodeUnderVespaHome("bin/vespa-jvm-dumper")); + context.log(log, Level.INFO, + "Executing '" + vespaJvmDumper + "' with arguments '" + configId + "' and '" + resultDirectoryInNode + "'"); + CommandResult result = container.executeCommandInContainerAsRoot( + context, vespaJvmDumper.toString(), configId, resultDirectoryInNode.toString()); + context.log(log, Level.INFO, + "vespa-jvm-dumper exited with code '" + result.getExitCode() + "' and output:\n" + result.getOutput()); + if (result.getExitCode() > 0) { + throw new IOException("Failed to jvm dump: " + result.getOutput()); + } + } +} diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/ServiceDumpReport.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/ServiceDumpReport.java index 0134254b0c6..6ff4929ada1 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/ServiceDumpReport.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/ServiceDumpReport.java @@ -9,6 +9,8 @@ import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.reports.BaseReport; +import java.util.List; + /** * JSON representation of Vespa service dump report. * @@ -27,6 +29,7 @@ class ServiceDumpReport extends BaseReport { private static final String CONFIG_ID_FIELD = "configId"; private static final String EXPIRE_AT_FIELD = "expireAt"; private static final String ERROR_FIELD = "error"; + private static final String ARTIFACTS_FIELD = "artifacts"; private final Long startedAt; private final Long completedAt; @@ -35,6 +38,7 @@ class ServiceDumpReport extends BaseReport { private final String configId; private final Long expireAt; private final String error; + private final List<String> artifacts; @JsonCreator public ServiceDumpReport(@JsonProperty(CREATED_FIELD) Long createdAt, @@ -44,7 +48,8 @@ class ServiceDumpReport extends BaseReport { @JsonProperty(LOCATION_FIELD) String location, @JsonProperty(CONFIG_ID_FIELD) String configId, @JsonProperty(EXPIRE_AT_FIELD) Long expireAt, - @JsonProperty(ERROR_FIELD) String error) { + @JsonProperty(ERROR_FIELD) String error, + @JsonProperty(ARTIFACTS_FIELD) List<String> artifacts) { super(createdAt, null); this.startedAt = startedAt; this.completedAt = completedAt; @@ -53,6 +58,7 @@ class ServiceDumpReport extends BaseReport { this.configId = configId; this.expireAt = expireAt; this.error = error; + this.artifacts = artifacts; } @JsonGetter(STARTED_AT_FIELD) public Long startedAt() { return startedAt; } @@ -62,6 +68,7 @@ class ServiceDumpReport extends BaseReport { @JsonGetter(CONFIG_ID_FIELD) public String configId() { return configId; } @JsonGetter(EXPIRE_AT_FIELD) public Long expireAt() { return expireAt; } @JsonGetter(ERROR_FIELD) public String error() { return error; } + @JsonGetter(ARTIFACTS_FIELD) public List<String> artifacts() { return artifacts; } @JsonIgnore public boolean isCompletedOrFailed() { return !isNullTimestamp(failedAt) || !isNullTimestamp(completedAt); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java index a0232e11d41..f98b47fa604 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java @@ -11,14 +11,15 @@ import com.yahoo.vespa.hosted.node.admin.maintenance.sync.SyncFileInfo; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext; import com.yahoo.vespa.hosted.node.admin.task.util.file.FileFinder; import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixPath; -import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult; import java.net.URI; -import java.nio.file.Path; import java.time.Clock; import java.time.Instant; import java.time.temporal.ChronoUnit; +import java.util.ArrayList; import java.util.List; +import java.util.Map; +import java.util.function.Function; import java.util.logging.Level; import java.util.logging.Logger; import java.util.stream.Collectors; @@ -38,6 +39,7 @@ public class VespaServiceDumperImpl implements VespaServiceDumper { private final SyncClient syncClient; private final NodeRepository nodeRepository; private final Clock clock; + private final Map<String, ArtifactProducer> artifactProducers; public VespaServiceDumperImpl(ContainerOperations container, SyncClient syncClient, NodeRepository nodeRepository) { this(container, syncClient, nodeRepository, Clock.systemUTC()); @@ -50,6 +52,9 @@ public class VespaServiceDumperImpl implements VespaServiceDumper { this.syncClient = syncClient; this.nodeRepository = nodeRepository; this.clock = clock; + this.artifactProducers = List.of(new JvmDumpProducer(container)) + .stream() + .collect(Collectors.toMap(ArtifactProducer::name, Function.identity())); } @Override @@ -76,11 +81,17 @@ public class VespaServiceDumperImpl implements VespaServiceDumper { handleFailure(context, request, startedAt, "Request already expired"); return; } + List<String> artifactTypes = request.artifacts(); + if (artifactTypes == null || artifactTypes.isEmpty()) { + handleFailure(context, request, startedAt, "No artifacts requested"); + return; + } UnixPath directoryInNode = new UnixPath(context.pathInNodeUnderVespaHome("tmp/vespa-service-dump")); UnixPath directoryOnHost = new UnixPath(context.pathOnHostFromPathInNode(directoryInNode.toPath())); try { context.log(log, Level.INFO, - "Creating dump for " + configId + " requested at " + Instant.ofEpochMilli(request.getCreatedMillisOrNull())); + "Creating service dump for " + configId + " requested at " + + Instant.ofEpochMilli(request.getCreatedMillisOrNull())); storeReport(context, createStartedReport(request, startedAt)); if (directoryOnHost.exists()) { context.log(log, Level.INFO, "Removing existing directory '" + directoryOnHost +"'."); @@ -89,18 +100,23 @@ public class VespaServiceDumperImpl implements VespaServiceDumper { context.log(log, Level.INFO, "Creating '" + directoryOnHost +"'."); directoryOnHost.createDirectory(); directoryOnHost.setPermissions("rwxrwxrwx"); - UnixPath vespaJvmDumper = new UnixPath(context.pathInNodeUnderVespaHome("bin/vespa-jvm-dumper")); - context.log(log, Level.INFO, "Executing '" + vespaJvmDumper + "' with arguments '" + configId + "' and '" + directoryInNode + "'"); - CommandResult result = container.executeCommandInContainerAsRoot( - context, vespaJvmDumper.toString(), configId, directoryInNode.toString()); - context.log(log, Level.INFO, "vespa-jvm-dumper exited with code '" + result.getExitCode() + "' and output:\n" + result.getOutput()); - if (result.getExitCode() > 0) { - handleFailure(context, request, startedAt, "Failed to create dump: " + result.getOutput()); - return; - } + List<SyncFileInfo> files = new ArrayList<>(); URI destination = serviceDumpDestination(nodeSpec, createDumpId(request)); + for (String artifactType : artifactTypes) { + ArtifactProducer producer = artifactProducers.get(artifactType); + if (producer == null) { + handleFailure(context, request, startedAt, "No artifact producer exists for '" + artifactType + "'"); + return; + } + context.log(log, "Producing artifact of type '" + artifactType + "'"); + UnixPath producerDirectoryOnHost = directoryOnHost.resolve(artifactType); + producerDirectoryOnHost.createDirectory(); + producerDirectoryOnHost.setPermissions("rwxrwxrwx"); + UnixPath producerDirectoryInNode = directoryInNode.resolve(artifactType); + producer.produceArtifact(context, configId, producerDirectoryInNode); + collectArtifactFilesToUpload(files, producerDirectoryOnHost, destination.resolve(artifactType + '/'), expiry); + } context.log(log, Level.INFO, "Uploading files with destination " + destination + " and expiry " + expiry); - List<SyncFileInfo> files = dumpFiles(directoryOnHost.toPath(), destination, expiry); if (!syncClient.sync(context, files, Integer.MAX_VALUE)) { handleFailure(context, request, startedAt, "Unable to upload all files"); return; @@ -117,10 +133,10 @@ public class VespaServiceDumperImpl implements VespaServiceDumper { } } - private List<SyncFileInfo> dumpFiles(Path directoryOnHost, URI destination, Instant expiry) { - return FileFinder.files(directoryOnHost).stream() + private void collectArtifactFilesToUpload(List<SyncFileInfo> files, UnixPath directoryOnHost, URI destination, Instant expiry) { + FileFinder.files(directoryOnHost.toPath()).stream() .flatMap(file -> SyncFileInfo.forServiceDump(destination, file.path(), expiry).stream()) - .collect(Collectors.toList()); + .forEach(files::add); } private static Instant expireAt(Instant startedAt, ServiceDumpReport request) { @@ -150,21 +166,21 @@ public class VespaServiceDumperImpl implements VespaServiceDumper { private static ServiceDumpReport createStartedReport(ServiceDumpReport request, Instant startedAt) { return new ServiceDumpReport( request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), null, null, null, request.configId(), - request.expireAt(), null); + request.expireAt(), null, request.artifacts()); } private static ServiceDumpReport createSuccessReport( Clock clock, ServiceDumpReport request, Instant startedAt, URI location) { return new ServiceDumpReport( request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), clock.instant().toEpochMilli(), null, - location.toString(), request.configId(), request.expireAt(), null); + location.toString(), request.configId(), request.expireAt(), null, request.artifacts()); } private static ServiceDumpReport createErrorReport( Clock clock, ServiceDumpReport request, Instant startedAt, String message) { return new ServiceDumpReport( request.getCreatedMillisOrNull(), startedAt.toEpochMilli(), null, clock.instant().toEpochMilli(), null, - request.configId(), request.expireAt(), message); + request.configId(), request.expireAt(), message, request.artifacts()); } static String createDumpId(ServiceDumpReport request) { diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java index 8c8f3d88a71..36668158dd6 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java @@ -79,7 +79,7 @@ public class NodeAdminImpl implements NodeAdmin { @Override public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) { Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream() - .collect(Collectors.toMap(nac -> nac.hostname().value(), Function.identity())); + .collect(Collectors.toMap(NodeAdminImpl::nodeAgentId, Function.identity())); // Stop and remove NodeAgents that should no longer be running diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) @@ -222,4 +222,14 @@ public class NodeAdminImpl implements NodeAdmin { NodeAgent nodeAgent = nodeAgentFactory.create(contextManager, context); return new NodeAgentWithScheduler(nodeAgent, contextManager); } + + private static String nodeAgentId(NodeAgentContext nac) { + // NodeAgentImpl has some internal state that should not be reused when the same hostname is re-allocated + // to a different application/cluster, solve this by including reservation timestamp in the key. + return nac.hostname().value() + "-" + nac.node().events().stream() + .filter(event -> "reserved".equals(event.type())) + .findFirst() + .map(event -> Long.toString(event.at().toEpochMilli())) + .orElse(""); + } } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java index c24b2261f42..37ecc6c4e56 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java @@ -102,6 +102,8 @@ public class NodeAdminStateUpdater { * with respect to: freeze, Orchestrator, and services running. */ public void converge(State wantedState) { + NodeSpec node = nodeRepository.getNode(hostHostname); + boolean hostIsActiveInNR = node.state() == NodeState.active; if (wantedState == RESUMED) { adjustNodeAgentsToRunFromNodeRepository(); } else if (currentState == TRANSITIONING && nodeAdmin.subsystemFreezeDuration().compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { @@ -110,21 +112,18 @@ public class NodeAdminStateUpdater { adjustNodeAgentsToRunFromNodeRepository(); nodeAdmin.setFrozen(false); - NodeState currentNodeState = nodeRepository.getNode(hostHostname).state(); - if (currentNodeState == NodeState.active) orchestrator.resume(hostHostname); + if (hostIsActiveInNR) orchestrator.resume(hostHostname); throw new ConvergenceException("Timed out trying to freeze all nodes: will force an unfrozen tick"); } - if (currentState == wantedState) return; + boolean wantFrozen = wantedState != RESUMED; + if (currentState == wantedState && wantFrozen == node.orchestratorStatus().isSuspended()) return; currentState = TRANSITIONING; - boolean wantFrozen = wantedState != RESUMED; - if (!nodeAdmin.setFrozen(wantFrozen)) { + if (!nodeAdmin.setFrozen(wantFrozen)) throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); - } - boolean hostIsActiveInNR = nodeRepository.getNode(hostHostname).state() == NodeState.active; switch (wantedState) { case RESUMED: if (hostIsActiveInNR) orchestrator.resume(hostHostname); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java index af88890f4a2..fe06812c608 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java @@ -7,7 +7,6 @@ import com.yahoo.config.provision.DockerImage; import com.yahoo.config.provision.NodeResources; import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.host.FlavorOverrides; -import com.yahoo.vespa.flags.InMemoryFlagSource; import com.yahoo.vespa.hosted.node.admin.configserver.ConfigServerApi; import com.yahoo.vespa.hosted.node.admin.configserver.ConfigServerApiImpl; import com.yahoo.vespa.hosted.provision.restapi.NodesV2ApiHandler; @@ -80,7 +79,7 @@ public class RealNodeRepositoryTest { private void waitForJdiscContainerToServe(ConfigServerApi configServerApi) throws InterruptedException { Instant start = Instant.now(); - nodeRepositoryApi = new RealNodeRepository(configServerApi, new InMemoryFlagSource()); + nodeRepositoryApi = new RealNodeRepository(configServerApi); while (Instant.now().minusSeconds(120).isBefore(start)) { try { nodeRepositoryApi.getNodes("foobar"); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/ContainerTester.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/ContainerTester.java index af30d3cbe56..1f3ab416db8 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/ContainerTester.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/ContainerTester.java @@ -78,7 +78,7 @@ public class ContainerTester implements AutoCloseable { for (int i = 1; i < 4; i++) ipAddresses.addAddress("host" + i + ".test.yahoo.com", "f000::" + i); NodeSpec hostSpec = NodeSpec.Builder.testSpec(HOST_HOSTNAME.value()).type(NodeType.host).build(); - nodeRepository.updateNodeRepositoryNode(hostSpec); + nodeRepository.updateNodeSpec(hostSpec); Clock clock = Clock.systemUTC(); Metrics metrics = new Metrics(); @@ -122,7 +122,7 @@ public class ContainerTester implements AutoCloseable { ", but that image does not exist in the container engine"); } } - nodeRepository.updateNodeRepositoryNode(new NodeSpec.Builder(nodeSpec) + nodeRepository.updateNodeSpec(new NodeSpec.Builder(nodeSpec) .parentHostname(HOST_HOSTNAME.value()) .build()); } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/NodeRepoMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/NodeRepoMock.java index 5722de4cf90..0c986929de1 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/NodeRepoMock.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/NodeRepoMock.java @@ -3,15 +3,17 @@ package com.yahoo.vespa.hosted.node.admin.integration; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.Acl; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.AddNode; +import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NoSuchNodeException; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeAttributes; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeRepository; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeState; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; import java.util.stream.Collectors; /** @@ -20,55 +22,60 @@ import java.util.stream.Collectors; * @author dybis */ public class NodeRepoMock implements NodeRepository { - private static final Object monitor = new Object(); - private final Map<String, NodeSpec> nodeRepositoryNodesByHostname = new HashMap<>(); + private final Map<String, NodeSpec> nodeSpecByHostname = new ConcurrentHashMap<>(); + private volatile Map<String, Acl> aclByHostname = Map.of(); @Override public void addNodes(List<AddNode> nodes) { } @Override public List<NodeSpec> getNodes(String baseHostName) { - synchronized (monitor) { - return nodeRepositoryNodesByHostname.values().stream() - .filter(node -> baseHostName.equals(node.parentHostname().orElse(null))) - .collect(Collectors.toList()); - } + return nodeSpecByHostname.values().stream() + .filter(node -> baseHostName.equals(node.parentHostname().orElse(null))) + .collect(Collectors.toList()); } @Override public Optional<NodeSpec> getOptionalNode(String hostName) { - synchronized (monitor) { - return Optional.ofNullable(nodeRepositoryNodesByHostname.get(hostName)); - } + return Optional.ofNullable(nodeSpecByHostname.get(hostName)); } @Override public Map<String, Acl> getAcls(String hostname) { - return Map.of(); + return aclByHostname; } @Override public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) { - synchronized (monitor) { - updateNodeRepositoryNode(new NodeSpec.Builder(getNode(hostName)) - .updateFromNodeAttributes(nodeAttributes) - .build()); - } + updateNodeSpec(new NodeSpec.Builder(getNode(hostName)) + .updateFromNodeAttributes(nodeAttributes) + .build()); } @Override public void setNodeState(String hostName, NodeState nodeState) { - synchronized (monitor) { - updateNodeRepositoryNode(new NodeSpec.Builder(getNode(hostName)) - .state(nodeState) - .build()); - } + updateNodeSpec(new NodeSpec.Builder(getNode(hostName)) + .state(nodeState) + .build()); } - public void updateNodeRepositoryNode(NodeSpec nodeSpec) { - synchronized (monitor) { - nodeRepositoryNodesByHostname.put(nodeSpec.hostname(), nodeSpec); - } + public void updateNodeSpec(NodeSpec nodeSpec) { + nodeSpecByHostname.put(nodeSpec.hostname(), nodeSpec); + } + + public void updateNodeSpec(String hostname, Function<NodeSpec.Builder, NodeSpec.Builder> mapper) { + nodeSpecByHostname.compute(hostname, (__, nodeSpec) -> { + if (nodeSpec == null) throw new NoSuchNodeException(hostname); + return mapper.apply(new NodeSpec.Builder(nodeSpec)).build(); + }); + } + + public void resetNodeSpecs() { + nodeSpecByHostname.clear(); + } + + public void setAcl(Map<String, Acl> aclByHostname) { + this.aclByHostname = Map.copyOf(aclByHostname); } } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java index 397305eae70..19d7e294367 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java @@ -41,7 +41,7 @@ class VespaServiceDumperImplTest { void creates_valid_dump_id_from_dump_request() { long nowMillis = Instant.now().toEpochMilli(); ServiceDumpReport request = new ServiceDumpReport( - nowMillis, null, null, null, null, "default/container.3", null, null); + nowMillis, null, null, null, null, "default/container.3", null, null, List.of(JvmDumpProducer.NAME)); String dumpId = VespaServiceDumperImpl.createDumpId(request); assertEquals("default-container-3-" + nowMillis, dumpId); } @@ -59,13 +59,13 @@ class VespaServiceDumperImplTest { NodeRepoMock nodeRepository = new NodeRepoMock(); ManualClock clock = new ManualClock(Instant.ofEpochMilli(1600001000000L)); ServiceDumpReport request = new ServiceDumpReport( - 1600000000000L, null, null, null, null, "default/container.1", null, null); + 1600000000000L, null, null, null, null, "default/container.1", null, null, List.of(JvmDumpProducer.NAME)); NodeSpec initialSpec = NodeSpec.Builder .testSpec(HOSTNAME, NodeState.active) .report(ServiceDumpReport.REPORT_ID, request.toJsonNode()) .archiveUri(URI.create("s3://uri-1/tenant1/")) .build(); - nodeRepository.updateNodeRepositoryNode(initialSpec); + nodeRepository.updateNodeSpec(initialSpec); // Create dumper and invoke tested method VespaServiceDumper reporter = new VespaServiceDumperImpl(operations, syncClient, nodeRepository, clock); @@ -78,13 +78,13 @@ class VespaServiceDumperImplTest { String expectedJson = "{\"createdMillis\":1600000000000,\"startedAt\":1600001000000,\"completedAt\":1600001000000," + "\"location\":\"s3://uri-1/tenant1/service-dump/default-container-1-1600000000000/\"," + - "\"configId\":\"default/container.1\"}"; + "\"configId\":\"default/container.1\",\"artifacts\":[\"jvm-dump\"]}"; assertReportEquals(nodeRepository, expectedJson); verify(operations).executeCommandInContainerAsRoot( - context, "/opt/vespa/bin/vespa-jvm-dumper", "default/container.1", "/opt/vespa/tmp/vespa-service-dump"); + context, "/opt/vespa/bin/vespa-jvm-dumper", "default/container.1", "/opt/vespa/tmp/vespa-service-dump/jvm-dump"); List<URI> expectedUris = List.of( - URI.create("s3://uri-1/tenant1/service-dump/default-container-1-1600000000000/heap.bin.zst"), - URI.create("s3://uri-1/tenant1/service-dump/default-container-1-1600000000000/jstack")); + URI.create("s3://uri-1/tenant1/service-dump/default-container-1-1600000000000/jvm-dump/heap.bin.zst"), + URI.create("s3://uri-1/tenant1/service-dump/default-container-1-1600000000000/jvm-dump/jstack")); assertSyncedFiles(context, syncClient, expectedUris); } @@ -113,8 +113,8 @@ class VespaServiceDumperImplTest { when(operations.executeCommandInContainerAsRoot(any(), any())) .thenAnswer(invocation -> { // Create dummy files to simulate vespa-jvm-dumper - Files.createFile(tmpDirectory.resolve("vespa-service-dump/heap.bin")); - Files.createFile(tmpDirectory.resolve("vespa-service-dump/jstack")); + Files.createFile(tmpDirectory.resolve("vespa-service-dump/" + JvmDumpProducer.NAME + "/heap.bin")); + Files.createFile(tmpDirectory.resolve("vespa-service-dump/" + JvmDumpProducer.NAME + "/jstack")); return new CommandResult(null, 0, "result"); }); return operations; diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java index 8ee3a95744b..e6fa4118542 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java @@ -2,13 +2,15 @@ package com.yahoo.vespa.hosted.node.admin.nodeadmin; import com.yahoo.config.provision.HostName; +import com.yahoo.config.provision.NodeType; import com.yahoo.test.ManualClock; import com.yahoo.vespa.flags.InMemoryFlagSource; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.Acl; -import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeRepository; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeState; +import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.OrchestratorStatus; import com.yahoo.vespa.hosted.node.admin.configserver.orchestrator.Orchestrator; +import com.yahoo.vespa.hosted.node.admin.integration.NodeRepoMock; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContextFactory; import org.junit.Test; @@ -16,7 +18,6 @@ import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Map; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -45,7 +46,7 @@ import static org.mockito.Mockito.when; */ public class NodeAdminStateUpdaterTest { private final NodeAgentContextFactory nodeAgentContextFactory = mock(NodeAgentContextFactory.class); - private final NodeRepository nodeRepository = mock(NodeRepository.class); + private final NodeRepoMock nodeRepository = spy(new NodeRepoMock()); private final Orchestrator orchestrator = mock(Orchestrator.class); private final NodeAdmin nodeAdmin = mock(NodeAdmin.class); private final HostName hostHostname = HostName.from("basehost1.test.yahoo.com"); @@ -78,10 +79,17 @@ public class NodeAdminStateUpdaterTest { verify(orchestrator, times(1)).resume(hostHostname.value()); verify(nodeAdmin, times(2)).setFrozen(eq(false)); + // Host is externally suspended in orchestrator, should be resumed by node-admin + setHostOrchestratorStatus(hostHostname, OrchestratorStatus.ALLOWED_TO_BE_DOWN); + updater.converge(RESUMED); + verify(orchestrator, times(2)).resume(hostHostname.value()); + verify(nodeAdmin, times(3)).setFrozen(eq(false)); + setHostOrchestratorStatus(hostHostname, OrchestratorStatus.NO_REMARKS); + // Lets try to suspend node admin only when(nodeAdmin.setFrozen(eq(true))).thenReturn(false); assertConvergeError(SUSPENDED_NODE_ADMIN, "NodeAdmin is not yet frozen"); - verify(nodeAdmin, times(2)).setFrozen(eq(false)); + verify(nodeAdmin, times(3)).setFrozen(eq(false)); } { @@ -92,10 +100,24 @@ public class NodeAdminStateUpdaterTest { doThrow(new RuntimeException(exceptionMessage)).doNothing() .when(orchestrator).suspend(eq(hostHostname.value())); assertConvergeError(SUSPENDED_NODE_ADMIN, exceptionMessage); - verify(nodeAdmin, times(2)).setFrozen(eq(false)); + verify(nodeAdmin, times(3)).setFrozen(eq(false)); updater.converge(SUSPENDED_NODE_ADMIN); - verify(nodeAdmin, times(2)).setFrozen(eq(false)); + verify(nodeAdmin, times(3)).setFrozen(eq(false)); + verify(orchestrator, times(2)).suspend(hostHostname.value()); + setHostOrchestratorStatus(hostHostname, OrchestratorStatus.ALLOWED_TO_BE_DOWN); + + // Already suspended, no changes + updater.converge(SUSPENDED_NODE_ADMIN); + verify(nodeAdmin, times(3)).setFrozen(eq(false)); + verify(orchestrator, times(2)).suspend(hostHostname.value()); + + // Host is externally resumed + setHostOrchestratorStatus(hostHostname, OrchestratorStatus.NO_REMARKS); + updater.converge(SUSPENDED_NODE_ADMIN); + verify(nodeAdmin, times(3)).setFrozen(eq(false)); + verify(orchestrator, times(3)).suspend(hostHostname.value()); + setHostOrchestratorStatus(hostHostname, OrchestratorStatus.ALLOWED_TO_BE_DOWN); } { @@ -106,7 +128,7 @@ public class NodeAdminStateUpdaterTest { assertConvergeError(SUSPENDED, exceptionMessage); verify(orchestrator, times(1)).suspend(eq(hostHostname.value()), eq(suspendHostnames)); // Make sure we dont roll back if we fail to stop services - we will try to stop again next tick - verify(nodeAdmin, times(2)).setFrozen(eq(false)); + verify(nodeAdmin, times(3)).setFrozen(eq(false)); // Finally we are successful in transitioning to frozen updater.converge(SUSPENDED); @@ -238,20 +260,22 @@ public class NodeAdminStateUpdaterTest { } private void mockNodeRepo(NodeState hostState, int numberOfNodes) { - List<NodeSpec> containersToRun = IntStream.range(1, numberOfNodes + 1) - .mapToObj(i -> NodeSpec.Builder.testSpec("host" + i + ".yahoo.com").build()) - .collect(Collectors.toList()); + nodeRepository.resetNodeSpecs(); + + IntStream.rangeClosed(1, numberOfNodes) + .mapToObj(i -> NodeSpec.Builder.testSpec("host" + i + ".yahoo.com").parentHostname(hostHostname.value()).build()) + .forEach(nodeRepository::updateNodeSpec); - when(nodeRepository.getNodes(eq(hostHostname.value()))).thenReturn(containersToRun); - when(nodeRepository.getNode(eq(hostHostname.value()))).thenReturn( - NodeSpec.Builder.testSpec(hostHostname.value(), hostState).build()); + nodeRepository.updateNodeSpec(NodeSpec.Builder.testSpec(hostHostname.value(), hostState).type(NodeType.host).build()); } private void mockAcl(Acl acl, int... nodeIds) { - Map<String, Acl> aclByHostname = Arrays.stream(nodeIds) + nodeRepository.setAcl(Arrays.stream(nodeIds) .mapToObj(i -> "host" + i + ".yahoo.com") - .collect(Collectors.toMap(Function.identity(), h -> acl)); + .collect(Collectors.toMap(Function.identity(), h -> acl))); + } - when(nodeRepository.getAcls(eq(hostHostname.value()))).thenReturn(aclByHostname); + private void setHostOrchestratorStatus(HostName hostname, OrchestratorStatus orchestratorStatus) { + nodeRepository.updateNodeSpec(hostname.value(), node -> node.orchestratorStatus(orchestratorStatus)); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java index 4d67c83a179..968220f8d5e 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java @@ -236,13 +236,15 @@ public class Nodes { * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { + if ( ! zone.environment().isProduction() || zone.system().isCd()) + return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); + var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; - } /** diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java index f6db9a45a61..edc83c2c74e 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java @@ -89,12 +89,13 @@ public class RebalancerTest { tester.nodeRepository().nodes().deactivate(List.of(cpuSkewedNode), new ApplicationTransaction(new ProvisionLock(cpuApp, () -> {}), tx)); tx.commit(); + assertEquals(1, tester.getNodes(Node.State.dirty).size()); // ... if activation fails when trying, we clean up the state tester.deployer().setFailActivate(true); tester.maintain(); assertTrue("Want to retire is reset", tester.getNodes(Node.State.active).stream().noneMatch(node -> node.status().wantToRetire())); - assertEquals("Reserved node was moved to dirty", 1, tester.getNodes(Node.State.dirty).size()); + assertEquals("Reserved node was moved to dirty", 2, tester.getNodes(Node.State.dirty).size()); String reservedHostname = tester.getNodes(Node.State.dirty).first().get().hostname(); tester.nodeRepository().nodes().setReady(reservedHostname, Agent.system, "Cleanup"); tester.nodeRepository().nodes().removeRecursively(reservedHostname); @@ -163,12 +164,12 @@ public class RebalancerTest { static class RebalancerTester { - static ApplicationId cpuApp = makeApplicationId("t1", "a1"); - static ApplicationId memoryApp = makeApplicationId("t2", "a2"); - private static NodeResources cpuResources = new NodeResources(8, 4, 10, 0.1); - private static NodeResources memResources = new NodeResources(4, 9, 10, 0.1); - private TestMetric metric = new TestMetric(); - private ProvisioningTester tester = new ProvisioningTester.Builder() + static final ApplicationId cpuApp = makeApplicationId("t1", "a1"); + static final ApplicationId memoryApp = makeApplicationId("t2", "a2"); + private static final NodeResources cpuResources = new NodeResources(8, 4, 10, 0.1); + private static final NodeResources memResources = new NodeResources(4, 9, 10, 0.1); + private final TestMetric metric = new TestMetric(); + private final ProvisioningTester tester = new ProvisioningTester.Builder() .zone(new Zone(Environment.perf, RegionName.from("us-east"))) .flavorsConfig(flavorsConfig()) .build(); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java index cd89cea60e3..277884a71c6 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java @@ -39,6 +39,7 @@ import java.util.List; import java.util.Optional; import java.util.Random; import java.util.Set; +import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -967,6 +968,24 @@ public class ProvisioningTest { newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet())); } + @Test + public void transitions_directly_to_dirty_in_cd() { + ApplicationId application = ProvisioningTester.applicationId(); + ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build(); + Capacity capacity = Capacity.from(new ClusterResources(2, 1, defaultResources)); + + BiConsumer<Zone, Node.State> stateAsserter = (zone, state) -> { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone).build(); + tester.makeReadyHosts(2, defaultResources).activateTenantHosts(); + tester.activate(application, tester.prepare(application, cluster, capacity)); + tester.deactivate(application); + assertEquals(2, tester.getNodes(application, state).size()); + }; + + stateAsserter.accept(new Zone(Environment.prod, RegionName.from("us-east")), Node.State.inactive); + stateAsserter.accept(new Zone(SystemName.cd, Environment.prod, RegionName.from("us-east")), Node.State.dirty); + } + private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size, int content1Size, NodeResources flavor, ProvisioningTester tester) { return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, flavor, "6.42"); diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java index 061b3f706a5..7090322f3cf 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java @@ -86,17 +86,11 @@ public class OrchestratorImpl implements Orchestrator { zone, Clock.systemUTC(), new ApplicationApiFactory(configServerConfig.zookeeperserver().size(), - resolveNumProxies(orchestratorConfig, flagSource), + orchestratorConfig.numProxies(), Clock.systemUTC()), orchestratorConfig.serviceMonitorConvergenceLatencySeconds()); } - private static int resolveNumProxies(OrchestratorConfig orchestratorConfig, FlagSource flagSource) { - return Flags.ORCHESTRATE_MISSING_PROXIES.bindTo(flagSource).value() ? - orchestratorConfig.numProxies() : - 0; - } - private OrchestratorImpl(ClusterControllerClientFactory clusterControllerClientFactory, StatusService statusService, ServiceMonitor serviceMonitor, diff --git a/searchcore/CMakeLists.txt b/searchcore/CMakeLists.txt index 2d5eb8dbc4f..c76f35bd9ff 100644 --- a/searchcore/CMakeLists.txt +++ b/searchcore/CMakeLists.txt @@ -20,6 +20,7 @@ vespa_define_module( fileacquirer LIBS + src/vespa/searchcore/bmcluster src/vespa/searchcore/config src/vespa/searchcore/grouping src/vespa/searchcore/proton/attribute diff --git a/searchcore/src/apps/vespa-feed-bm/CMakeLists.txt b/searchcore/src/apps/vespa-feed-bm/CMakeLists.txt index fe83c89d83a..daefef5d413 100644 --- a/searchcore/src/apps/vespa-feed-bm/CMakeLists.txt +++ b/searchcore/src/apps/vespa-feed-bm/CMakeLists.txt @@ -2,39 +2,7 @@ vespa_add_executable(searchcore_vespa_feed_bm_app SOURCES vespa_feed_bm.cpp - bm_cluster_controller.cpp - bm_message_bus.cpp - bm_storage_chain_builder.cpp - bm_storage_link.cpp - bucket_info_queue.cpp - document_api_message_bus_bm_feed_handler.cpp - pending_tracker.cpp - pending_tracker_hash.cpp - spi_bm_feed_handler.cpp - storage_api_chain_bm_feed_handler.cpp - storage_api_message_bus_bm_feed_handler.cpp - storage_api_rpc_bm_feed_handler.cpp - storage_reply_error_checker.cpp OUTPUT_NAME vespa-feed-bm DEPENDS - searchcore_server - searchcore_initializer - searchcore_reprocessing - searchcore_index - searchcore_persistenceengine - searchcore_docsummary - searchcore_feedoperation - searchcore_matching - searchcore_attribute - searchcore_documentmetastore - searchcore_bucketdb - searchcore_flushengine - searchcore_pcommon - searchcore_grouping - searchcore_proton_metrics - searchcore_fconfig - storageserver_storageapp - messagebus_messagebus-test - messagebus - searchlib_searchlib_uca + searchcore_bmcluster ) diff --git a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp index cd1920d237f..940dcf35449 100644 --- a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp +++ b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp @@ -1,80 +1,24 @@ // Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include "bm_cluster_controller.h" -#include "bm_message_bus.h" -#include "bm_storage_chain_builder.h" -#include "bm_storage_link_context.h" -#include "pending_tracker.h" -#include "spi_bm_feed_handler.h" -#include "storage_api_chain_bm_feed_handler.h" -#include "storage_api_message_bus_bm_feed_handler.h" -#include "storage_api_rpc_bm_feed_handler.h" -#include "document_api_message_bus_bm_feed_handler.h" -#include <tests/proton/common/dummydbowner.h> -#include <vespa/config-attributes.h> -#include <vespa/config-bucketspaces.h> -#include <vespa/config-imported-fields.h> -#include <vespa/config-indexschema.h> -#include <vespa/config-persistence.h> -#include <vespa/config-rank-profiles.h> -#include <vespa/config-slobroks.h> -#include <vespa/config-stor-distribution.h> -#include <vespa/config-stor-filestor.h> -#include <vespa/config-summary.h> -#include <vespa/config-summarymap.h> -#include <vespa/config-upgrading.h> -#include <vespa/config/common/configcontext.h> -#include <vespa/document/datatype/documenttype.h> -#include <vespa/document/fieldset/fieldsetrepo.h> -#include <vespa/document/fieldvalue/intfieldvalue.h> #include <vespa/document/repo/configbuilder.h> #include <vespa/document/repo/document_type_repo_factory.h> #include <vespa/document/repo/documenttyperepo.h> -#include <vespa/document/test/make_bucket_space.h> -#include <vespa/document/update/assignvalueupdate.h> -#include <vespa/document/update/documentupdate.h> #include <vespa/fastos/app.h> -#include <vespa/messagebus/config-messagebus.h> -#include <vespa/messagebus/testlib/slobrok.h> -#include <vespa/metrics/config-metricsmanager.h> -#include <vespa/searchcommon/common/schemaconfigurer.h> -#include <vespa/searchcore/proton/common/alloc_config.h> -#include <vespa/searchcore/proton/common/hw_info.h> -#include <vespa/searchcore/proton/matching/querylimiter.h> -#include <vespa/searchcore/proton/metrics/metricswireservice.h> -#include <vespa/searchcore/proton/persistenceengine/ipersistenceengineowner.h> -#include <vespa/searchcore/proton/persistenceengine/persistenceengine.h> -#include <vespa/searchcore/proton/server/bootstrapconfig.h> -#include <vespa/searchcore/proton/server/document_db_maintenance_config.h> -#include <vespa/searchcore/proton/server/documentdb.h> -#include <vespa/searchcore/proton/server/documentdbconfigmanager.h> -#include <vespa/searchcore/proton/server/fileconfigmanager.h> -#include <vespa/searchcore/proton/server/memoryconfigstore.h> -#include <vespa/searchcore/proton/server/persistencehandlerproxy.h> -#include <vespa/searchcore/proton/server/threading_service_config.h> -#include <vespa/searchcore/proton/test/disk_mem_usage_notifier.h> +#include <vespa/searchcore/bmcluster/bm_cluster.h> +#include <vespa/searchcore/bmcluster/bm_cluster_controller.h> +#include <vespa/searchcore/bmcluster/bm_cluster_params.h> +#include <vespa/searchcore/bmcluster/bm_feed.h> +#include <vespa/searchcore/bmcluster/bm_node.h> +#include <vespa/searchcore/bmcluster/bm_range.h> +#include <vespa/searchcore/bmcluster/bucket_selector.h> +#include <vespa/searchcore/bmcluster/spi_bm_feed_handler.h> #include <vespa/searchlib/index/dummyfileheadercontext.h> -#include <vespa/searchlib/transactionlog/translogserver.h> -#include <vespa/searchsummary/config/config-juniperrc.h> -#include <vespa/slobrok/sbmirror.h> -#include <vespa/storage/bucketdb/config-stor-bucket-init.h> -#include <vespa/storage/common/i_storage_chain_builder.h> -#include <vespa/storage/config/config-stor-bouncer.h> -#include <vespa/storage/config/config-stor-communicationmanager.h> -#include <vespa/storage/config/config-stor-distributormanager.h> -#include <vespa/storage/config/config-stor-opslogger.h> -#include <vespa/storage/config/config-stor-prioritymapping.h> -#include <vespa/storage/config/config-stor-server.h> -#include <vespa/storage/config/config-stor-status.h> -#include <vespa/storage/config/config-stor-visitordispatcher.h> -#include <vespa/storage/storageserver/rpc/shared_rpc_resources.h> -#include <vespa/storage/visiting/config-stor-visitor.h> -#include <vespa/storageserver/app/distributorprocess.h> -#include <vespa/storageserver/app/servicelayerprocess.h> #include <vespa/vespalib/io/fileutil.h> +#include <vespa/vespalib/objects/nbostream.h> #include <vespa/vespalib/testkit/testapp.h> #include <vespa/vespalib/util/lambdatask.h> #include <vespa/vespalib/util/size_literals.h> +#include <vespa/vespalib/util/threadstackexecutor.h> #include <getopt.h> #include <iostream> #include <thread> @@ -82,82 +26,31 @@ #include <vespa/log/log.h> LOG_SETUP("vespa-feed-bm"); -using namespace cloud::config::filedistribution; -using namespace config; using namespace proton; using namespace std::chrono_literals; -using namespace vespa::config::search::core; -using namespace vespa::config::search::summary; -using namespace vespa::config::search; -using vespa::config::content::PersistenceConfigBuilder; -using vespa::config::content::StorDistributionConfigBuilder; -using vespa::config::content::StorFilestorConfigBuilder; -using vespa::config::content::UpgradingConfigBuilder; -using vespa::config::content::core::BucketspacesConfig; -using vespa::config::content::core::BucketspacesConfigBuilder; -using vespa::config::content::core::StorBouncerConfigBuilder; -using vespa::config::content::core::StorBucketInitConfigBuilder; -using vespa::config::content::core::StorCommunicationmanagerConfigBuilder; -using vespa::config::content::core::StorDistributormanagerConfigBuilder; -using vespa::config::content::core::StorOpsloggerConfigBuilder; -using vespa::config::content::core::StorPrioritymappingConfigBuilder; -using vespa::config::content::core::StorServerConfigBuilder; -using vespa::config::content::core::StorStatusConfigBuilder; -using vespa::config::content::core::StorVisitorConfigBuilder; -using vespa::config::content::core::StorVisitordispatcherConfigBuilder; -using cloud::config::SlobroksConfigBuilder; -using messagebus::MessagebusConfigBuilder; -using metrics::MetricsmanagerConfigBuilder; -using config::ConfigContext; -using config::ConfigSet; -using config::ConfigUri; -using config::IConfigContext; -using document::AssignValueUpdate; -using document::BucketId; -using document::BucketSpace; -using document::Document; -using document::DocumentId; -using document::DocumentType; using document::DocumentTypeRepo; using document::DocumentTypeRepoFactory; -using document::DocumentUpdate; using document::DocumenttypesConfig; using document::DocumenttypesConfigBuilder; -using document::Field; -using document::FieldSetRepo; -using document::FieldUpdate; -using document::IntFieldValue; -using document::test::makeBucketSpace; -using feedbm::BmClusterController; -using feedbm::BmMessageBus; -using feedbm::BmStorageChainBuilder; -using feedbm::BmStorageLinkContext; -using feedbm::IBmFeedHandler; -using feedbm::DocumentApiMessageBusBmFeedHandler; -using feedbm::SpiBmFeedHandler; -using feedbm::StorageApiChainBmFeedHandler; -using feedbm::StorageApiMessageBusBmFeedHandler; -using feedbm::StorageApiRpcBmFeedHandler; -using search::TuneFileDocumentDB; +using search::bmcluster::BmClusterController; +using search::bmcluster::IBmFeedHandler; +using search::bmcluster::BmClusterParams; +using search::bmcluster::BmCluster; +using search::bmcluster::BmFeed; +using search::bmcluster::BmNode; +using search::bmcluster::BmRange; +using search::bmcluster::BucketSelector; using search::index::DummyFileHeaderContext; -using search::index::Schema; -using search::index::SchemaBuilder; -using search::transactionlog::TransLogServer; -using storage::rpc::SharedRpcResources; -using storage::rpc::StorageApiRpcService; using storage::spi::PersistenceProvider; -using vespalib::compression::CompressionConfig; using vespalib::makeLambdaTask; -using proton::ThreadingServiceConfig; - -using DocumentDBMap = std::map<DocTypeName, std::shared_ptr<DocumentDB>>; namespace { vespalib::string base_dir = "testdb"; +constexpr int base_port = 9017; -std::shared_ptr<DocumenttypesConfig> make_document_type() { +std::shared_ptr<DocumenttypesConfig> make_document_types() { using Struct = document::config_builder::Struct; using DataType = document::DataType; document::config_builder::DocumenttypesConfigBuilderHelper builder; @@ -165,130 +58,14 @@ std::shared_ptr<DocumenttypesConfig> make_document_type() { return std::make_shared<DocumenttypesConfig>(builder.config()); } -std::shared_ptr<AttributesConfig> make_attributes_config() { - AttributesConfigBuilder builder; - AttributesConfig::Attribute attribute; - attribute.name = "int"; - attribute.datatype = AttributesConfig::Attribute::Datatype::INT32; - builder.attribute.emplace_back(attribute); - return std::make_shared<AttributesConfig>(builder); -} - -std::shared_ptr<DocumentDBConfig> make_document_db_config(std::shared_ptr<DocumenttypesConfig> document_types, std::shared_ptr<const DocumentTypeRepo> repo, const DocTypeName& doc_type_name) -{ - auto indexschema = std::make_shared<IndexschemaConfig>(); - auto attributes = make_attributes_config(); - auto summary = std::make_shared<SummaryConfig>(); - std::shared_ptr<Schema> schema(new Schema()); - SchemaBuilder::build(*indexschema, *schema); - SchemaBuilder::build(*attributes, *schema); - SchemaBuilder::build(*summary, *schema); - return std::make_shared<DocumentDBConfig>( - 1, - std::make_shared<RankProfilesConfig>(), - std::make_shared<matching::RankingConstants>(), - std::make_shared<matching::RankingExpressions>(), - std::make_shared<matching::OnnxModels>(), - indexschema, - attributes, - summary, - std::make_shared<SummarymapConfig>(), - std::make_shared<JuniperrcConfig>(), - document_types, - repo, - std::make_shared<ImportedFieldsConfig>(), - std::make_shared<TuneFileDocumentDB>(), - schema, - std::make_shared<DocumentDBMaintenanceConfig>(), - search::LogDocumentStore::Config(), - std::make_shared<const ThreadingServiceConfig>(ThreadingServiceConfig::make(1)), - std::make_shared<const AllocConfig>(), - "client", - doc_type_name.getName()); -} - -void -make_slobroks_config(SlobroksConfigBuilder& slobroks, int slobrok_port) -{ - SlobroksConfigBuilder::Slobrok slobrok; - slobrok.connectionspec = vespalib::make_string("tcp/localhost:%d", slobrok_port); - slobroks.slobrok.push_back(std::move(slobrok)); -} - -void -make_bucketspaces_config(BucketspacesConfigBuilder &bucketspaces) -{ - BucketspacesConfigBuilder::Documenttype bucket_space_map; - bucket_space_map.name = "test"; - bucket_space_map.bucketspace = "default"; - bucketspaces.documenttype.emplace_back(std::move(bucket_space_map)); -} - -class MyPersistenceEngineOwner : public IPersistenceEngineOwner -{ - void setClusterState(BucketSpace, const storage::spi::ClusterState &) override { } -}; - -struct MyResourceWriteFilter : public IResourceWriteFilter -{ - bool acceptWriteOperation() const override { return true; } - State getAcceptState() const override { return IResourceWriteFilter::State(); } -}; - -class BucketSelector -{ - uint32_t _thread_id; - uint32_t _threads; - uint32_t _num_buckets; -public: - BucketSelector(uint32_t thread_id_in, uint32_t threads_in, uint32_t num_buckets_in) - : _thread_id(thread_id_in), - _threads(threads_in), - _num_buckets((num_buckets_in / _threads) * _threads) - { - } - uint64_t operator()(uint32_t i) const { - return (static_cast<uint64_t>(i) * _threads + _thread_id) % _num_buckets; - } -}; - -class BMRange -{ - uint32_t _start; - uint32_t _end; -public: - BMRange(uint32_t start_in, uint32_t end_in) - : _start(start_in), - _end(end_in) - { - } - uint32_t get_start() const { return _start; } - uint32_t get_end() const { return _end; } -}; - -class BMParams { +class BMParams : public BmClusterParams { uint32_t _documents; uint32_t _client_threads; uint32_t _get_passes; - vespalib::string _indexing_sequencer; uint32_t _put_passes; uint32_t _update_passes; uint32_t _remove_passes; - uint32_t _rpc_network_threads; - uint32_t _rpc_events_before_wakeup; - uint32_t _rpc_targets_per_node; - uint32_t _response_threads; uint32_t _max_pending; - bool _enable_distributor; - bool _enable_service_layer; - bool _skip_get_spi_bucket_info; - bool _use_document_api; - bool _use_message_bus; - bool _use_storage_chain; - bool _use_async_message_handling_on_schedule; - uint32_t _bucket_db_stripe_bits; - uint32_t _distributor_stripes; - bool _skip_communicationmanager_thread; uint32_t get_start(uint32_t thread_id) const { return (_documents / _client_threads) * thread_id + std::min(thread_id, _documents % _client_threads); } @@ -297,82 +74,38 @@ public: : _documents(160000), _client_threads(1), _get_passes(0), - _indexing_sequencer(), _put_passes(2), _update_passes(1), _remove_passes(2), - _rpc_network_threads(1), // Same default as previous in stor-communicationmanager.def - _rpc_events_before_wakeup(1), // Same default as in stor-communicationmanager.def - _rpc_targets_per_node(1), // Same default as in stor-communicationmanager.def - _response_threads(2), // Same default as in stor-filestor.def - _max_pending(1000), - _enable_distributor(false), - _enable_service_layer(false), - _skip_get_spi_bucket_info(false), - _use_document_api(false), - _use_message_bus(false), - _use_storage_chain(false), - _use_async_message_handling_on_schedule(false), - _bucket_db_stripe_bits(0), - _distributor_stripes(0), - _skip_communicationmanager_thread(false) // Same default as in stor-communicationmanager.def + _max_pending(1000) { } - BMRange get_range(uint32_t thread_id) const { - return BMRange(get_start(thread_id), get_start(thread_id + 1)); + BmRange get_range(uint32_t thread_id) const { + return BmRange(get_start(thread_id), get_start(thread_id + 1)); } uint32_t get_documents() const { return _documents; } uint32_t get_max_pending() const { return _max_pending; } uint32_t get_client_threads() const { return _client_threads; } uint32_t get_get_passes() const { return _get_passes; } - const vespalib::string & get_indexing_sequencer() const { return _indexing_sequencer; } uint32_t get_put_passes() const { return _put_passes; } uint32_t get_update_passes() const { return _update_passes; } uint32_t get_remove_passes() const { return _remove_passes; } - uint32_t get_rpc_network_threads() const { return _rpc_network_threads; } - uint32_t get_rpc_events_before_wakup() const { return _rpc_events_before_wakeup; } - uint32_t get_rpc_targets_per_node() const { return _rpc_targets_per_node; } - uint32_t get_response_threads() const { return _response_threads; } - bool get_enable_distributor() const { return _enable_distributor; } - bool get_skip_get_spi_bucket_info() const { return _skip_get_spi_bucket_info; } - bool get_use_document_api() const { return _use_document_api; } - bool get_use_message_bus() const { return _use_message_bus; } - bool get_use_storage_chain() const { return _use_storage_chain; } - bool get_use_async_message_handling_on_schedule() const { return _use_async_message_handling_on_schedule; } - uint32_t get_bucket_db_stripe_bits() const { return _bucket_db_stripe_bits; } - uint32_t get_distributor_stripes() const { return _distributor_stripes; } - bool get_skip_communicationmanager_thread() const { return _skip_communicationmanager_thread; } void set_documents(uint32_t documents_in) { _documents = documents_in; } void set_max_pending(uint32_t max_pending_in) { _max_pending = max_pending_in; } void set_client_threads(uint32_t threads_in) { _client_threads = threads_in; } void set_get_passes(uint32_t get_passes_in) { _get_passes = get_passes_in; } - void set_indexing_sequencer(vespalib::stringref sequencer) { _indexing_sequencer = sequencer; } void set_put_passes(uint32_t put_passes_in) { _put_passes = put_passes_in; } void set_update_passes(uint32_t update_passes_in) { _update_passes = update_passes_in; } void set_remove_passes(uint32_t remove_passes_in) { _remove_passes = remove_passes_in; } - void set_rpc_network_threads(uint32_t threads_in) { _rpc_network_threads = threads_in; } - void set_rpc_events_before_wakeup(uint32_t value) { _rpc_events_before_wakeup = value; } - void set_rpc_targets_per_node(uint32_t targets_in) { _rpc_targets_per_node = targets_in; } - void set_response_threads(uint32_t threads_in) { _response_threads = threads_in; } - void set_enable_distributor(bool value) { _enable_distributor = value; } - void set_enable_service_layer(bool value) { _enable_service_layer = value; } - void set_skip_get_spi_bucket_info(bool value) { _skip_get_spi_bucket_info = value; } - void set_use_document_api(bool value) { _use_document_api = value; } - void set_use_message_bus(bool value) { _use_message_bus = value; } - void set_use_storage_chain(bool value) { _use_storage_chain = value; } - void set_use_async_message_handling_on_schedule(bool value) { _use_async_message_handling_on_schedule = value; } - void set_bucket_db_stripe_bits(uint32_t value) { _bucket_db_stripe_bits = value; } - void set_distributor_stripes(uint32_t value) { _distributor_stripes = value; } - void set_skip_communicationmanager_thread(bool value) { _skip_communicationmanager_thread = value; } bool check() const; - bool needs_service_layer() const { return _enable_service_layer || _enable_distributor || _use_storage_chain || _use_message_bus || _use_document_api; } - bool needs_distributor() const { return _enable_distributor || _use_document_api; } - bool needs_message_bus() const { return _use_message_bus || _use_document_api; } }; bool BMParams::check() const { + if (!BmClusterParams::check()) { + return false; + } if (_client_threads < 1) { std::cerr << "Too few client threads: " << _client_threads << std::endl; return false; @@ -389,637 +122,37 @@ BMParams::check() const std::cerr << "Put passes too low: " << _put_passes << std::endl; return false; } - if (_rpc_network_threads < 1) { - std::cerr << "Too few rpc network threads: " << _rpc_network_threads << std::endl; - return false; - } - if (_rpc_targets_per_node < 1) { - std::cerr << "Too few rpc targets per node: " << _rpc_targets_per_node << std::endl; - return false; - } - if (_response_threads < 1) { - std::cerr << "Too few response threads: " << _response_threads << std::endl; - return false; - } return true; } -class MyServiceLayerProcess : public storage::ServiceLayerProcess { - PersistenceProvider& _provider; - -public: - MyServiceLayerProcess(const config::ConfigUri & configUri, - PersistenceProvider &provider, - std::unique_ptr<storage::IStorageChainBuilder> chain_builder); - ~MyServiceLayerProcess() override { shutdown(); } - - void shutdown() override; - void setupProvider() override; - PersistenceProvider& getProvider() override; -}; - -MyServiceLayerProcess::MyServiceLayerProcess(const config::ConfigUri & configUri, - PersistenceProvider &provider, - std::unique_ptr<storage::IStorageChainBuilder> chain_builder) - : ServiceLayerProcess(configUri), - _provider(provider) -{ - if (chain_builder) { - set_storage_chain_builder(std::move(chain_builder)); - } -} - -void -MyServiceLayerProcess::shutdown() -{ - ServiceLayerProcess::shutdown(); -} - -void -MyServiceLayerProcess::setupProvider() -{ -} - -PersistenceProvider& -MyServiceLayerProcess::getProvider() -{ - return _provider; -} - -struct MyStorageConfig -{ - vespalib::string config_id; - DocumenttypesConfigBuilder documenttypes; - StorDistributionConfigBuilder stor_distribution; - StorBouncerConfigBuilder stor_bouncer; - StorCommunicationmanagerConfigBuilder stor_communicationmanager; - StorOpsloggerConfigBuilder stor_opslogger; - StorPrioritymappingConfigBuilder stor_prioritymapping; - UpgradingConfigBuilder upgrading; - StorServerConfigBuilder stor_server; - StorStatusConfigBuilder stor_status; - BucketspacesConfigBuilder bucketspaces; - MetricsmanagerConfigBuilder metricsmanager; - SlobroksConfigBuilder slobroks; - MessagebusConfigBuilder messagebus; - - MyStorageConfig(bool distributor, const vespalib::string& config_id_in, const DocumenttypesConfig& documenttypes_in, - int slobrok_port, int mbus_port, int rpc_port, int status_port, const BMParams& params) - : config_id(config_id_in), - documenttypes(documenttypes_in), - stor_distribution(), - stor_bouncer(), - stor_communicationmanager(), - stor_opslogger(), - stor_prioritymapping(), - upgrading(), - stor_server(), - stor_status(), - bucketspaces(), - metricsmanager(), - slobroks(), - messagebus() - { - { - auto &dc = stor_distribution; - { - StorDistributionConfigBuilder::Group group; - { - StorDistributionConfigBuilder::Group::Nodes node; - node.index = 0; - group.nodes.push_back(std::move(node)); - } - group.index = "invalid"; - group.name = "invalid"; - group.capacity = 1.0; - group.partitions = ""; - dc.group.push_back(std::move(group)); - } - dc.redundancy = 1; - dc.readyCopies = 1; - } - stor_server.isDistributor = distributor; - stor_server.contentNodeBucketDbStripeBits = params.get_bucket_db_stripe_bits(); - if (distributor) { - stor_server.rootFolder = "distributor"; - } else { - stor_server.rootFolder = "storage"; - } - make_slobroks_config(slobroks, slobrok_port); - stor_communicationmanager.rpc.numNetworkThreads = params.get_rpc_network_threads(); - stor_communicationmanager.rpc.eventsBeforeWakeup = params.get_rpc_events_before_wakup(); - stor_communicationmanager.rpc.numTargetsPerNode = params.get_rpc_targets_per_node(); - stor_communicationmanager.mbusport = mbus_port; - stor_communicationmanager.rpcport = rpc_port; - stor_communicationmanager.skipThread = params.get_skip_communicationmanager_thread(); - - stor_status.httpport = status_port; - make_bucketspaces_config(bucketspaces); - } - - ~MyStorageConfig(); - - void add_builders(ConfigSet &set) { - set.addBuilder(config_id, &documenttypes); - set.addBuilder(config_id, &stor_distribution); - set.addBuilder(config_id, &stor_bouncer); - set.addBuilder(config_id, &stor_communicationmanager); - set.addBuilder(config_id, &stor_opslogger); - set.addBuilder(config_id, &stor_prioritymapping); - set.addBuilder(config_id, &upgrading); - set.addBuilder(config_id, &stor_server); - set.addBuilder(config_id, &stor_status); - set.addBuilder(config_id, &bucketspaces); - set.addBuilder(config_id, &metricsmanager); - set.addBuilder(config_id, &slobroks); - set.addBuilder(config_id, &messagebus); - } -}; - -MyStorageConfig::~MyStorageConfig() = default; - -struct MyServiceLayerConfig : public MyStorageConfig -{ - PersistenceConfigBuilder persistence; - StorFilestorConfigBuilder stor_filestor; - StorBucketInitConfigBuilder stor_bucket_init; - StorVisitorConfigBuilder stor_visitor; - - MyServiceLayerConfig(const vespalib::string& config_id_in, const DocumenttypesConfig& documenttypes_in, - int slobrok_port, int mbus_port, int rpc_port, int status_port, const BMParams& params) - : MyStorageConfig(false, config_id_in, documenttypes_in, slobrok_port, mbus_port, rpc_port, status_port, params), - persistence(), - stor_filestor(), - stor_bucket_init(), - stor_visitor() - { - stor_filestor.numResponseThreads = params.get_response_threads(); - stor_filestor.numNetworkThreads = params.get_rpc_network_threads(); - stor_filestor.useAsyncMessageHandlingOnSchedule = params.get_use_async_message_handling_on_schedule(); - } - - ~MyServiceLayerConfig(); - - void add_builders(ConfigSet &set) { - MyStorageConfig::add_builders(set); - set.addBuilder(config_id, &persistence); - set.addBuilder(config_id, &stor_filestor); - set.addBuilder(config_id, &stor_bucket_init); - set.addBuilder(config_id, &stor_visitor); - } -}; - -MyServiceLayerConfig::~MyServiceLayerConfig() = default; - -struct MyDistributorConfig : public MyStorageConfig -{ - StorDistributormanagerConfigBuilder stor_distributormanager; - StorVisitordispatcherConfigBuilder stor_visitordispatcher; - - MyDistributorConfig(const vespalib::string& config_id_in, const DocumenttypesConfig& documenttypes_in, - int slobrok_port, int mbus_port, int rpc_port, int status_port, const BMParams& params) - : MyStorageConfig(true, config_id_in, documenttypes_in, slobrok_port, mbus_port, rpc_port, status_port, params), - stor_distributormanager(), - stor_visitordispatcher() - { - stor_distributormanager.numDistributorStripes = params.get_distributor_stripes(); - } - - ~MyDistributorConfig(); - - void add_builders(ConfigSet &set) { - MyStorageConfig::add_builders(set); - set.addBuilder(config_id, &stor_distributormanager); - set.addBuilder(config_id, &stor_visitordispatcher); - } -}; - -MyDistributorConfig::~MyDistributorConfig() = default; - -struct MyRpcClientConfig { - vespalib::string config_id; - SlobroksConfigBuilder slobroks; - - MyRpcClientConfig(const vespalib::string &config_id_in, int slobrok_port) - : config_id(config_id_in), - slobroks() - { - make_slobroks_config(slobroks, slobrok_port); - } - ~MyRpcClientConfig(); - - void add_builders(ConfigSet &set) { - set.addBuilder(config_id, &slobroks); - } -}; - -MyRpcClientConfig::~MyRpcClientConfig() = default; - -struct MyMessageBusConfig { - vespalib::string config_id; - SlobroksConfigBuilder slobroks; - MessagebusConfigBuilder messagebus; - - MyMessageBusConfig(const vespalib::string &config_id_in, int slobrok_port) - : config_id(config_id_in), - slobroks(), - messagebus() - { - make_slobroks_config(slobroks, slobrok_port); - } - ~MyMessageBusConfig(); - - void add_builders(ConfigSet &set) { - set.addBuilder(config_id, &slobroks); - set.addBuilder(config_id, &messagebus); - } -}; - -MyMessageBusConfig::~MyMessageBusConfig() = default; - } struct PersistenceProviderFixture { - std::shared_ptr<DocumenttypesConfig> _document_types; + std::shared_ptr<const DocumenttypesConfig> _document_types; std::shared_ptr<const DocumentTypeRepo> _repo; - DocTypeName _doc_type_name; - const DocumentType* _document_type; - const Field& _field; - std::shared_ptr<DocumentDBConfig> _document_db_config; - vespalib::string _base_dir; - DummyFileHeaderContext _file_header_context; - int _tls_listen_port; - int _slobrok_port; - int _rpc_client_port; - int _service_layer_mbus_port; - int _service_layer_rpc_port; - int _service_layer_status_port; - int _distributor_mbus_port; - int _distributor_rpc_port; - int _distributor_status_port; - TransLogServer _tls; - vespalib::string _tls_spec; - matching::QueryLimiter _query_limiter; - vespalib::Clock _clock; - DummyWireService _metrics_wire_service; - MemoryConfigStores _config_stores; - vespalib::ThreadStackExecutor _summary_executor; - DummyDBOwner _document_db_owner; - BucketSpace _bucket_space; - std::shared_ptr<DocumentDB> _document_db; - MyPersistenceEngineOwner _persistence_owner; - MyResourceWriteFilter _write_filter; - test::DiskMemUsageNotifier _disk_mem_usage_notifier; - std::shared_ptr<PersistenceEngine> _persistence_engine; - std::unique_ptr<const FieldSetRepo> _field_set_repo; - uint32_t _bucket_bits; - MyServiceLayerConfig _service_layer_config; - MyDistributorConfig _distributor_config; - MyRpcClientConfig _rpc_client_config; - MyMessageBusConfig _message_bus_config; - ConfigSet _config_set; - std::shared_ptr<IConfigContext> _config_context; - std::unique_ptr<IBmFeedHandler> _feed_handler; - std::unique_ptr<mbus::Slobrok> _slobrok; - std::shared_ptr<BmStorageLinkContext> _service_layer_chain_context; - std::unique_ptr<MyServiceLayerProcess> _service_layer; - std::unique_ptr<SharedRpcResources> _rpc_client_shared_rpc_resources; - std::shared_ptr<BmStorageLinkContext> _distributor_chain_context; - std::unique_ptr<storage::DistributorProcess> _distributor; - std::unique_ptr<BmMessageBus> _message_bus; + std::unique_ptr<BmCluster> _bm_cluster; + BmFeed _feed; + IBmFeedHandler* _feed_handler; explicit PersistenceProviderFixture(const BMParams& params); ~PersistenceProviderFixture(); - void create_document_db(const BMParams & params); - uint32_t num_buckets() const { return (1u << _bucket_bits); } - BucketId make_bucket_id(uint32_t n) const { return BucketId(_bucket_bits, n & (num_buckets() - 1)); } - document::Bucket make_bucket(uint32_t n) const { return document::Bucket(_bucket_space, make_bucket_id(n)); } - DocumentId make_document_id(uint32_t n, uint32_t i) const; - std::unique_ptr<Document> make_document(uint32_t n, uint32_t i) const; - std::unique_ptr<DocumentUpdate> make_document_update(uint32_t n, uint32_t i) const; - void create_buckets(); - void wait_slobrok(const vespalib::string &name); - void start_service_layer(const BMParams& params); - void start_distributor(const BMParams& params); - void start_message_bus(); - void create_feed_handler(const BMParams& params); - void shutdown_feed_handler(); - void shutdown_message_bus(); - void shutdown_distributor(); - void shutdown_service_layer(); }; PersistenceProviderFixture::PersistenceProviderFixture(const BMParams& params) - : _document_types(make_document_type()), - _repo(DocumentTypeRepoFactory::make(*_document_types)), - _doc_type_name("test"), - _document_type(_repo->getDocumentType(_doc_type_name.getName())), - _field(_document_type->getField("int")), - _document_db_config(make_document_db_config(_document_types, _repo, _doc_type_name)), - _base_dir(base_dir), - _file_header_context(), - _tls_listen_port(9017), - _slobrok_port(9018), - _rpc_client_port(9019), - _service_layer_mbus_port(9020), - _service_layer_rpc_port(9021), - _service_layer_status_port(9022), - _distributor_mbus_port(9023), - _distributor_rpc_port(9024), - _distributor_status_port(9025), - _tls("tls", _tls_listen_port, _base_dir, _file_header_context), - _tls_spec(vespalib::make_string("tcp/localhost:%d", _tls_listen_port)), - _query_limiter(), - _clock(), - _metrics_wire_service(), - _config_stores(), - _summary_executor(8, 128_Ki), - _document_db_owner(), - _bucket_space(makeBucketSpace(_doc_type_name.getName())), - _document_db(), - _persistence_owner(), - _write_filter(), - _disk_mem_usage_notifier(), - _persistence_engine(), - _field_set_repo(std::make_unique<const FieldSetRepo>(*_repo)), - _bucket_bits(16), - _service_layer_config("bm-servicelayer", *_document_types, _slobrok_port, _service_layer_mbus_port, _service_layer_rpc_port, _service_layer_status_port, params), - _distributor_config("bm-distributor", *_document_types, _slobrok_port, _distributor_mbus_port, _distributor_rpc_port, _distributor_status_port, params), - _rpc_client_config("bm-rpc-client", _slobrok_port), - _message_bus_config("bm-message-bus", _slobrok_port), - _config_set(), - _config_context(std::make_shared<ConfigContext>(_config_set)), - _feed_handler(), - _slobrok(), - _service_layer_chain_context(), - _service_layer(), - _rpc_client_shared_rpc_resources(), - _distributor_chain_context(), - _distributor(), - _message_bus() + : _document_types(make_document_types()), + _repo(document::DocumentTypeRepoFactory::make(*_document_types)), + _bm_cluster(std::make_unique<BmCluster>(base_dir, base_port, params, _document_types, _repo)), + _feed(_repo), + _feed_handler(nullptr) { - create_document_db(params); - _persistence_engine = std::make_unique<PersistenceEngine>(_persistence_owner, _write_filter, _disk_mem_usage_notifier, -1, false); - auto proxy = std::make_shared<PersistenceHandlerProxy>(_document_db); - _persistence_engine->putHandler(_persistence_engine->getWLock(), _bucket_space, _doc_type_name, proxy); - _service_layer_config.add_builders(_config_set); - _distributor_config.add_builders(_config_set); - _rpc_client_config.add_builders(_config_set); - _message_bus_config.add_builders(_config_set); - _feed_handler = std::make_unique<SpiBmFeedHandler>(*_persistence_engine, *_field_set_repo, params.get_skip_get_spi_bucket_info()); + _bm_cluster->make_nodes(); } -PersistenceProviderFixture::~PersistenceProviderFixture() -{ - if (_persistence_engine) { - _persistence_engine->destroyIterators(); - _persistence_engine->removeHandler(_persistence_engine->getWLock(), _bucket_space, _doc_type_name); - } - if (_document_db) { - _document_db->close(); - } -} - -void -PersistenceProviderFixture::create_document_db(const BMParams & params) -{ - vespalib::mkdir(_base_dir, false); - vespalib::mkdir(_base_dir + "/" + _doc_type_name.getName(), false); - vespalib::string input_cfg = _base_dir + "/" + _doc_type_name.getName() + "/baseconfig"; - { - FileConfigManager fileCfg(input_cfg, "", _doc_type_name.getName()); - fileCfg.saveConfig(*_document_db_config, 1); - } - config::DirSpec spec(input_cfg + "/config-1"); - auto tuneFileDocDB = std::make_shared<TuneFileDocumentDB>(); - DocumentDBConfigHelper mgr(spec, _doc_type_name.getName()); - auto protonCfg = std::make_shared<ProtonConfigBuilder>(); - if ( ! params.get_indexing_sequencer().empty()) { - vespalib::string sequencer = params.get_indexing_sequencer(); - std::transform(sequencer.begin(), sequencer.end(), sequencer.begin(), [](unsigned char c){ return std::toupper(c); }); - protonCfg->indexing.optimize = ProtonConfig::Indexing::getOptimize(sequencer); - } - auto bootstrap_config = std::make_shared<BootstrapConfig>(1, - _document_types, - _repo, - std::move(protonCfg), - std::make_shared<FiledistributorrpcConfig>(), - std::make_shared<BucketspacesConfig>(), - tuneFileDocDB, HwInfo()); - mgr.forwardConfig(bootstrap_config); - mgr.nextGeneration(0ms); - _document_db = DocumentDB::create(_base_dir, mgr.getConfig(), _tls_spec, _query_limiter, _clock, _doc_type_name, - _bucket_space, *bootstrap_config->getProtonConfigSP(), _document_db_owner, - _summary_executor, _summary_executor, *_persistence_engine, _tls, - _metrics_wire_service, _file_header_context, - _config_stores.getConfigStore(_doc_type_name.toString()), - std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), HwInfo()); - _document_db->start(); - _document_db->waitForOnlineState(); -} - -DocumentId -PersistenceProviderFixture::make_document_id(uint32_t n, uint32_t i) const -{ - DocumentId id(vespalib::make_string("id::test:n=%u:%u", n & (num_buckets() - 1), i)); - return id; -} - -std::unique_ptr<Document> -PersistenceProviderFixture::make_document(uint32_t n, uint32_t i) const -{ - auto id = make_document_id(n, i); - auto document = std::make_unique<Document>(*_document_type, id); - document->setRepo(*_repo); - document->setFieldValue(_field, std::make_unique<IntFieldValue>(i)); - return document; -} - -std::unique_ptr<DocumentUpdate> -PersistenceProviderFixture::make_document_update(uint32_t n, uint32_t i) const -{ - auto id = make_document_id(n, i); - auto document_update = std::make_unique<DocumentUpdate>(*_repo, *_document_type, id); - document_update->addUpdate(FieldUpdate(_field).addUpdate(AssignValueUpdate(IntFieldValue(15)))); - return document_update; -} - -void -PersistenceProviderFixture::create_buckets() -{ - SpiBmFeedHandler feed_handler(*_persistence_engine, *_field_set_repo, false); - for (unsigned int i = 0; i < num_buckets(); ++i) { - feed_handler.create_bucket(make_bucket(i)); - } -} - -void -PersistenceProviderFixture::wait_slobrok(const vespalib::string &name) -{ - auto &mirror = _rpc_client_shared_rpc_resources->slobrok_mirror(); - LOG(info, "Waiting for %s in slobrok", name.c_str()); - for (;;) { - auto specs = mirror.lookup(name); - if (!specs.empty()) { - LOG(info, "Found %s in slobrok", name.c_str()); - return; - } - std::this_thread::sleep_for(100ms); - } -} - -void -PersistenceProviderFixture::start_service_layer(const BMParams& params) -{ - LOG(info, "start slobrok"); - _slobrok = std::make_unique<mbus::Slobrok>(_slobrok_port); - LOG(info, "start service layer"); - config::ConfigUri config_uri("bm-servicelayer", _config_context); - std::unique_ptr<BmStorageChainBuilder> chain_builder; - if (params.get_use_storage_chain() && !params.needs_distributor()) { - chain_builder = std::make_unique<BmStorageChainBuilder>(); - _service_layer_chain_context = chain_builder->get_context(); - } - _service_layer = std::make_unique<MyServiceLayerProcess>(config_uri, - *_persistence_engine, - std::move(chain_builder)); - _service_layer->setupConfig(100ms); - _service_layer->createNode(); - _service_layer->getNode().waitUntilInitialized(); - LOG(info, "start rpc client shared resources"); - config::ConfigUri client_config_uri("bm-rpc-client", _config_context); - _rpc_client_shared_rpc_resources = std::make_unique<SharedRpcResources> - (client_config_uri, _rpc_client_port, 100, params.get_rpc_events_before_wakup()); - _rpc_client_shared_rpc_resources->start_server_and_register_slobrok("bm-rpc-client"); - wait_slobrok("storage/cluster.storage/storage/0/default"); - wait_slobrok("storage/cluster.storage/storage/0"); - BmClusterController fake_controller(*_rpc_client_shared_rpc_resources); - fake_controller.set_cluster_up(false); -} - -void -PersistenceProviderFixture::start_distributor(const BMParams& params) -{ - config::ConfigUri config_uri("bm-distributor", _config_context); - std::unique_ptr<BmStorageChainBuilder> chain_builder; - if (params.get_use_storage_chain() && !params.get_use_document_api()) { - chain_builder = std::make_unique<BmStorageChainBuilder>(); - _distributor_chain_context = chain_builder->get_context(); - } - _distributor = std::make_unique<storage::DistributorProcess>(config_uri); - if (chain_builder) { - _distributor->set_storage_chain_builder(std::move(chain_builder)); - } - _distributor->setupConfig(100ms); - _distributor->createNode(); - wait_slobrok("storage/cluster.storage/distributor/0/default"); - wait_slobrok("storage/cluster.storage/distributor/0"); - BmClusterController fake_controller(*_rpc_client_shared_rpc_resources); - fake_controller.set_cluster_up(true); - // Wait for bucket ownership transfer safe time - std::this_thread::sleep_for(2s); -} - -void -PersistenceProviderFixture::start_message_bus() -{ - config::ConfigUri config_uri("bm-message-bus", _config_context); - LOG(info, "Starting message bus"); - _message_bus = std::make_unique<BmMessageBus>(config_uri, _repo); - LOG(info, "Started message bus"); -} - -void -PersistenceProviderFixture::create_feed_handler(const BMParams& params) -{ - StorageApiRpcService::Params rpc_params; - // This is the same compression config as the default in stor-communicationmanager.def. - rpc_params.compression_config = CompressionConfig(CompressionConfig::Type::LZ4, 3, 90, 1024); - rpc_params.num_rpc_targets_per_node = params.get_rpc_targets_per_node(); - if (params.get_use_document_api()) { - _feed_handler = std::make_unique<DocumentApiMessageBusBmFeedHandler>(*_message_bus); - } else if (params.get_enable_distributor()) { - if (params.get_use_storage_chain()) { - assert(_distributor_chain_context); - _feed_handler = std::make_unique<StorageApiChainBmFeedHandler>(_distributor_chain_context, true); - } else if (params.get_use_message_bus()) { - _feed_handler = std::make_unique<StorageApiMessageBusBmFeedHandler>(*_message_bus, true); - } else { - _feed_handler = std::make_unique<StorageApiRpcBmFeedHandler>(*_rpc_client_shared_rpc_resources, _repo, rpc_params, true); - } - } else if (params.needs_service_layer()) { - if (params.get_use_storage_chain()) { - assert(_service_layer_chain_context); - _feed_handler = std::make_unique<StorageApiChainBmFeedHandler>(_service_layer_chain_context, false); - } else if (params.get_use_message_bus()) { - _feed_handler = std::make_unique<StorageApiMessageBusBmFeedHandler>(*_message_bus, false); - } else { - _feed_handler = std::make_unique<StorageApiRpcBmFeedHandler>(*_rpc_client_shared_rpc_resources, _repo, rpc_params, false); - } - } -} - -void -PersistenceProviderFixture::shutdown_feed_handler() -{ - _feed_handler.reset(); -} - -void -PersistenceProviderFixture::shutdown_message_bus() -{ - if (_message_bus) { - LOG(info, "stop message bus"); - _message_bus.reset(); - } -} - -void -PersistenceProviderFixture::shutdown_distributor() -{ - if (_distributor) { - LOG(info, "stop distributor"); - _distributor->getNode().requestShutdown("controlled shutdown"); - _distributor->shutdown(); - } -} - -void -PersistenceProviderFixture::shutdown_service_layer() -{ - if (_rpc_client_shared_rpc_resources) { - LOG(info, "stop rpc client shared resources"); - _rpc_client_shared_rpc_resources->shutdown(); - _rpc_client_shared_rpc_resources.reset(); - } - if (_service_layer) { - LOG(info, "stop service layer"); - _service_layer->getNode().requestShutdown("controlled shutdown"); - _service_layer->shutdown(); - } - if (_slobrok) { - LOG(info, "stop slobrok"); - _slobrok.reset(); - } -} - -vespalib::nbostream -make_put_feed(PersistenceProviderFixture &f, BMRange range, BucketSelector bucket_selector) -{ - vespalib::nbostream serialized_feed; - LOG(debug, "make_put_feed([%u..%u))", range.get_start(), range.get_end()); - for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { - auto n = bucket_selector(i); - serialized_feed << f.make_bucket_id(n); - auto document = f.make_document(n, i); - document->serialize(serialized_feed); - } - return serialized_feed; -} +PersistenceProviderFixture::~PersistenceProviderFixture() = default; std::vector<vespalib::nbostream> -make_feed(vespalib::ThreadStackExecutor &executor, const BMParams &bm_params, std::function<vespalib::nbostream(BMRange,BucketSelector)> func, uint32_t num_buckets, const vespalib::string &label) +make_feed(vespalib::ThreadStackExecutor &executor, const BMParams &bm_params, std::function<vespalib::nbostream(BmRange,BucketSelector)> func, uint32_t num_buckets, const vespalib::string &label) { LOG(info, "make_feed %s %u small documents", label.c_str(), bm_params.get_documents()); std::vector<vespalib::nbostream> serialized_feed_v; @@ -1038,27 +171,6 @@ make_feed(vespalib::ThreadStackExecutor &executor, const BMParams &bm_params, st return serialized_feed_v; } -void -put_async_task(PersistenceProviderFixture &f, uint32_t max_pending, BMRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias) -{ - LOG(debug, "put_async_task([%u..%u))", range.get_start(), range.get_end()); - feedbm::PendingTracker pending_tracker(max_pending); - f._feed_handler->attach_bucket_info_queue(pending_tracker); - auto &repo = *f._repo; - vespalib::nbostream is(serialized_feed.data(), serialized_feed.size()); - BucketId bucket_id; - auto bucket_space = f._bucket_space; - bool use_timestamp = !f._feed_handler->manages_timestamp(); - for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { - is >> bucket_id; - document::Bucket bucket(bucket_space, bucket_id); - auto document = std::make_unique<Document>(repo, is); - f._feed_handler->put(bucket, std::move(document), (use_timestamp ? (time_bias + i) : 0), pending_tracker); - } - assert(is.empty()); - pending_tracker.drain(); -} - class AvgSampler { private: double _total; @@ -1077,73 +189,42 @@ void run_put_async_tasks(PersistenceProviderFixture& f, vespalib::ThreadStackExecutor& executor, int pass, int64_t& time_bias, const std::vector<vespalib::nbostream>& serialized_feed_v, const BMParams& bm_params, AvgSampler& sampler) { - uint32_t old_errors = f._feed_handler->get_error_count(); + auto& feed = f._feed; + auto& feed_handler = *f._feed_handler; + uint32_t old_errors = feed_handler.get_error_count(); auto start_time = std::chrono::steady_clock::now(); for (uint32_t i = 0; i < bm_params.get_client_threads(); ++i) { auto range = bm_params.get_range(i); - executor.execute(makeLambdaTask([&f, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range, time_bias]() - { put_async_task(f, max_pending, range, serialized_feed, time_bias); })); + executor.execute(makeLambdaTask([&feed, &feed_handler, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range, time_bias]() + { feed.put_async_task(feed_handler, max_pending, range, serialized_feed, time_bias); })); } executor.sync(); auto end_time = std::chrono::steady_clock::now(); std::chrono::duration<double> elapsed = end_time - start_time; - uint32_t new_errors = f._feed_handler->get_error_count() - old_errors; + uint32_t new_errors = feed_handler.get_error_count() - old_errors; double throughput = bm_params.get_documents() / elapsed.count(); sampler.sample(throughput); LOG(info, "putAsync: pass=%u, errors=%u, puts/s: %8.2f", pass, new_errors, throughput); time_bias += bm_params.get_documents(); } -vespalib::nbostream -make_update_feed(PersistenceProviderFixture &f, BMRange range, BucketSelector bucket_selector) -{ - vespalib::nbostream serialized_feed; - LOG(debug, "make_update_feed([%u..%u))", range.get_start(), range.get_end()); - for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { - auto n = bucket_selector(i); - serialized_feed << f.make_bucket_id(n); - auto document_update = f.make_document_update(n, i); - document_update->serializeHEAD(serialized_feed); - } - return serialized_feed; -} - -void -update_async_task(PersistenceProviderFixture &f, uint32_t max_pending, BMRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias) -{ - LOG(debug, "update_async_task([%u..%u))", range.get_start(), range.get_end()); - feedbm::PendingTracker pending_tracker(max_pending); - f._feed_handler->attach_bucket_info_queue(pending_tracker); - auto &repo = *f._repo; - vespalib::nbostream is(serialized_feed.data(), serialized_feed.size()); - BucketId bucket_id; - auto bucket_space = f._bucket_space; - bool use_timestamp = !f._feed_handler->manages_timestamp(); - for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { - is >> bucket_id; - document::Bucket bucket(bucket_space, bucket_id); - auto document_update = DocumentUpdate::createHEAD(repo, is); - f._feed_handler->update(bucket, std::move(document_update), (use_timestamp ? (time_bias + i) : 0), pending_tracker); - } - assert(is.empty()); - pending_tracker.drain(); -} - void run_update_async_tasks(PersistenceProviderFixture& f, vespalib::ThreadStackExecutor& executor, int pass, int64_t& time_bias, const std::vector<vespalib::nbostream>& serialized_feed_v, const BMParams& bm_params, AvgSampler& sampler) { - uint32_t old_errors = f._feed_handler->get_error_count(); + auto& feed = f._feed; + auto& feed_handler = *f._feed_handler; + uint32_t old_errors = feed_handler.get_error_count(); auto start_time = std::chrono::steady_clock::now(); for (uint32_t i = 0; i < bm_params.get_client_threads(); ++i) { auto range = bm_params.get_range(i); - executor.execute(makeLambdaTask([&f, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range, time_bias]() - { update_async_task(f, max_pending, range, serialized_feed, time_bias); })); + executor.execute(makeLambdaTask([&feed, &feed_handler, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range, time_bias]() + { feed.update_async_task(feed_handler, max_pending, range, serialized_feed, time_bias); })); } executor.sync(); auto end_time = std::chrono::steady_clock::now(); std::chrono::duration<double> elapsed = end_time - start_time; - uint32_t new_errors = f._feed_handler->get_error_count() - old_errors; + uint32_t new_errors = feed_handler.get_error_count() - old_errors; double throughput = bm_params.get_documents() / elapsed.count(); sampler.sample(throughput); LOG(info, "updateAsync: pass=%u, errors=%u, updates/s: %8.2f", pass, new_errors, throughput); @@ -1151,94 +232,44 @@ run_update_async_tasks(PersistenceProviderFixture& f, vespalib::ThreadStackExecu } void -get_async_task(PersistenceProviderFixture &f, uint32_t max_pending, BMRange range, const vespalib::nbostream &serialized_feed) -{ - LOG(debug, "get_async_task([%u..%u))", range.get_start(), range.get_end()); - feedbm::PendingTracker pending_tracker(max_pending); - vespalib::nbostream is(serialized_feed.data(), serialized_feed.size()); - BucketId bucket_id; - vespalib::string all_fields(document::AllFields::NAME); - auto bucket_space = f._bucket_space; - for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { - is >> bucket_id; - document::Bucket bucket(bucket_space, bucket_id); - DocumentId document_id(is); - f._feed_handler->get(bucket, all_fields, document_id, pending_tracker); - } - assert(is.empty()); - pending_tracker.drain(); -} - -void run_get_async_tasks(PersistenceProviderFixture& f, vespalib::ThreadStackExecutor& executor, int pass, const std::vector<vespalib::nbostream>& serialized_feed_v, const BMParams& bm_params, AvgSampler& sampler) { - uint32_t old_errors = f._feed_handler->get_error_count(); + auto& feed = f._feed; + auto& feed_handler = *f._feed_handler; + uint32_t old_errors = feed_handler.get_error_count(); auto start_time = std::chrono::steady_clock::now(); for (uint32_t i = 0; i < bm_params.get_client_threads(); ++i) { auto range = bm_params.get_range(i); - executor.execute(makeLambdaTask([&f, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range]() - { get_async_task(f, max_pending, range, serialized_feed); })); + executor.execute(makeLambdaTask([&feed, &feed_handler, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range]() + { feed.get_async_task(feed_handler, max_pending, range, serialized_feed); })); } executor.sync(); auto end_time = std::chrono::steady_clock::now(); std::chrono::duration<double> elapsed = end_time - start_time; - uint32_t new_errors = f._feed_handler->get_error_count() - old_errors; + uint32_t new_errors = feed_handler.get_error_count() - old_errors; double throughput = bm_params.get_documents() / elapsed.count(); sampler.sample(throughput); LOG(info, "getAsync: pass=%u, errors=%u, gets/s: %8.2f", pass, new_errors, throughput); } -vespalib::nbostream -make_remove_feed(PersistenceProviderFixture &f, BMRange range, BucketSelector bucket_selector) -{ - vespalib::nbostream serialized_feed; - LOG(debug, "make_update_feed([%u..%u))", range.get_start(), range.get_end()); - for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { - auto n = bucket_selector(i); - serialized_feed << f.make_bucket_id(n); - auto document_id = f.make_document_id(n, i); - vespalib::string raw_id = document_id.toString(); - serialized_feed.write(raw_id.c_str(), raw_id.size() + 1); - } - return serialized_feed; -} - -void -remove_async_task(PersistenceProviderFixture &f, uint32_t max_pending, BMRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias) -{ - LOG(debug, "remove_async_task([%u..%u))", range.get_start(), range.get_end()); - feedbm::PendingTracker pending_tracker(max_pending); - f._feed_handler->attach_bucket_info_queue(pending_tracker); - vespalib::nbostream is(serialized_feed.data(), serialized_feed.size()); - BucketId bucket_id; - auto bucket_space = f._bucket_space; - bool use_timestamp = !f._feed_handler->manages_timestamp(); - for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { - is >> bucket_id; - document::Bucket bucket(bucket_space, bucket_id); - DocumentId document_id(is); - f._feed_handler->remove(bucket, document_id, (use_timestamp ? (time_bias + i) : 0), pending_tracker); - } - assert(is.empty()); - pending_tracker.drain(); -} - void run_remove_async_tasks(PersistenceProviderFixture& f, vespalib::ThreadStackExecutor& executor, int pass, int64_t& time_bias, const std::vector<vespalib::nbostream>& serialized_feed_v, const BMParams& bm_params, AvgSampler& sampler) { - uint32_t old_errors = f._feed_handler->get_error_count(); + auto& feed = f._feed; + auto& feed_handler = *f._feed_handler; + uint32_t old_errors = feed_handler.get_error_count(); auto start_time = std::chrono::steady_clock::now(); for (uint32_t i = 0; i < bm_params.get_client_threads(); ++i) { auto range = bm_params.get_range(i); - executor.execute(makeLambdaTask([&f, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range, time_bias]() - { remove_async_task(f, max_pending, range, serialized_feed, time_bias); })); + executor.execute(makeLambdaTask([&feed, &feed_handler, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range, time_bias]() + { feed.remove_async_task(feed_handler, max_pending, range, serialized_feed, time_bias); })); } executor.sync(); auto end_time = std::chrono::steady_clock::now(); std::chrono::duration<double> elapsed = end_time - start_time; - uint32_t new_errors = f._feed_handler->get_error_count() - old_errors; + uint32_t new_errors = feed_handler.get_error_count() - old_errors; double throughput = bm_params.get_documents() / elapsed.count(); sampler.sample(throughput); LOG(info, "removeAsync: pass=%u, errors=%u, removes/s: %8.2f", pass, new_errors, throughput); @@ -1310,27 +341,14 @@ void benchmark_async_spi(const BMParams &bm_params) { vespalib::rmdir(base_dir, true); PersistenceProviderFixture f(bm_params); - auto &provider = *f._persistence_engine; - LOG(info, "start initialize"); - provider.initialize(); - LOG(info, "create %u buckets", f.num_buckets()); - if (!bm_params.needs_distributor()) { - f.create_buckets(); - } - if (bm_params.needs_service_layer()) { - f.start_service_layer(bm_params); - } - if (bm_params.needs_distributor()) { - f.start_distributor(bm_params); - } - if (bm_params.needs_message_bus()) { - f.start_message_bus(); - } - f.create_feed_handler(bm_params); + auto& cluster = *f._bm_cluster; + cluster.start(f._feed); + f._feed_handler = cluster.get_feed_handler(); vespalib::ThreadStackExecutor executor(bm_params.get_client_threads(), 128_Ki); - auto put_feed = make_feed(executor, bm_params, [&f](BMRange range, BucketSelector bucket_selector) { return make_put_feed(f, range, bucket_selector); }, f.num_buckets(), "put"); - auto update_feed = make_feed(executor, bm_params, [&f](BMRange range, BucketSelector bucket_selector) { return make_update_feed(f, range, bucket_selector); }, f.num_buckets(), "update"); - auto remove_feed = make_feed(executor, bm_params, [&f](BMRange range, BucketSelector bucket_selector) { return make_remove_feed(f, range, bucket_selector); }, f.num_buckets(), "remove"); + auto& feed = f._feed; + auto put_feed = make_feed(executor, bm_params, [&feed](BmRange range, BucketSelector bucket_selector) { return feed.make_put_feed(range, bucket_selector); }, f._feed.num_buckets(), "put"); + auto update_feed = make_feed(executor, bm_params, [&feed](BmRange range, BucketSelector bucket_selector) { return feed.make_update_feed(range, bucket_selector); }, f._feed.num_buckets(), "update"); + auto remove_feed = make_feed(executor, bm_params, [&feed](BmRange range, BucketSelector bucket_selector) { return feed.make_remove_feed(range, bucket_selector); }, f._feed.num_buckets(), "remove"); int64_t time_bias = 1; LOG(info, "Feed handler is '%s'", f._feed_handler->get_name().c_str()); benchmark_async_put(f, executor, time_bias, put_feed, bm_params); @@ -1339,10 +357,8 @@ void benchmark_async_spi(const BMParams &bm_params) benchmark_async_remove(f, executor, time_bias, remove_feed, bm_params); LOG(info, "--------------------------------"); - f.shutdown_feed_handler(); - f.shutdown_message_bus(); - f.shutdown_distributor(); - f.shutdown_service_layer(); + f._feed_handler = nullptr; + cluster.stop(); } class App : public FastOS_Application @@ -1412,6 +428,7 @@ App::get_options() { "get-passes", 1, nullptr, 0 }, { "indexing-sequencer", 1, nullptr, 0 }, { "max-pending", 1, nullptr, 0 }, + { "nodes", 1, nullptr, 0 }, { "put-passes", 1, nullptr, 0 }, { "remove-passes", 1, nullptr, 0 }, { "response-threads", 1, nullptr, 0 }, @@ -1436,6 +453,7 @@ App::get_options() LONGOPT_GET_PASSES, LONGOPT_INDEXING_SEQUENCER, LONGOPT_MAX_PENDING, + LONGOPT_NODES, LONGOPT_PUT_PASSES, LONGOPT_REMOVE_PASSES, LONGOPT_RESPONSE_THREADS, @@ -1483,6 +501,9 @@ App::get_options() case LONGOPT_MAX_PENDING: _bm_params.set_max_pending(atoi(opt_argument)); break; + case LONGOPT_NODES: + _bm_params.set_num_nodes(atoi(opt_argument)); + break; case LONGOPT_PUT_PASSES: _bm_params.set_put_passes(atoi(opt_argument)); break; diff --git a/searchcore/src/vespa/searchcore/bmcluster/CMakeLists.txt b/searchcore/src/vespa/searchcore/bmcluster/CMakeLists.txt new file mode 100644 index 00000000000..8d6dbd0f938 --- /dev/null +++ b/searchcore/src/vespa/searchcore/bmcluster/CMakeLists.txt @@ -0,0 +1,42 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +vespa_add_library(searchcore_bmcluster STATIC + SOURCES + bm_cluster.cpp + bm_cluster_controller.cpp + bm_cluster_params.cpp + bm_feed.cpp + bm_message_bus.cpp + bm_node.cpp + bm_storage_chain_builder.cpp + bm_storage_link.cpp + bucket_info_queue.cpp + document_api_message_bus_bm_feed_handler.cpp + pending_tracker.cpp + pending_tracker_hash.cpp + spi_bm_feed_handler.cpp + storage_api_chain_bm_feed_handler.cpp + storage_api_message_bus_bm_feed_handler.cpp + storage_api_rpc_bm_feed_handler.cpp + storage_reply_error_checker.cpp + DEPENDS + searchcore_server + searchcore_initializer + searchcore_reprocessing + searchcore_index + searchcore_persistenceengine + searchcore_docsummary + searchcore_feedoperation + searchcore_matching + searchcore_attribute + searchcore_documentmetastore + searchcore_bucketdb + searchcore_flushengine + searchcore_pcommon + searchcore_grouping + searchcore_proton_metrics + searchcore_fconfig + storageserver_storageapp + messagebus_messagebus-test + messagebus + searchlib_searchlib_uca +) diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.cpp new file mode 100644 index 00000000000..58011d9c67a --- /dev/null +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.cpp @@ -0,0 +1,384 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "bm_cluster.h" +#include "bm_cluster_controller.h" +#include "bm_feed.h" +#include "bm_message_bus.h" +#include "bm_node.h" +#include "spi_bm_feed_handler.h" +#include <vespa/config/common/configcontext.h> +#include <vespa/storage/storageserver/rpc/shared_rpc_resources.h> +#include <vespa/messagebus/config-messagebus.h> +#include <vespa/messagebus/testlib/slobrok.h> +#include <vespa/slobrok/sbmirror.h> +#include <vespa/vespalib/io/fileutil.h> +#include <vespa/vespalib/stllike/asciistream.h> +#include <vespa/vespalib/util/stringfmt.h> +#include <thread> + +#include <vespa/log/log.h> +LOG_SETUP(".bmcluster.bm_cluster"); + +using cloud::config::SlobroksConfigBuilder; +using config::ConfigSet; +using messagebus::MessagebusConfigBuilder; +using storage::rpc::SharedRpcResources; + +namespace search::bmcluster { + +namespace { + +vespalib::string message_bus_config_id("bm-message-bus"); +vespalib::string rpc_client_config_id("bm-rpc-client"); + +enum class PortBias +{ + SLOBROK_PORT = 0, + RPC_CLIENT_PORT = 1, + NUM_PORTS = 2 +}; + +int port_number(int base_port, PortBias bias) +{ + return base_port + static_cast<int>(bias); +} + +void +make_slobroks_config(SlobroksConfigBuilder& slobroks, int slobrok_port) +{ + SlobroksConfigBuilder::Slobrok slobrok; + slobrok.connectionspec = vespalib::make_string("tcp/localhost:%d", slobrok_port); + slobroks.slobrok.push_back(std::move(slobrok)); +} + +} + +struct BmCluster::MessageBusConfigSet { + vespalib::string config_id; + SlobroksConfigBuilder slobroks; + MessagebusConfigBuilder messagebus; + + MessageBusConfigSet(const vespalib::string &config_id_in, int slobrok_port) + : config_id(config_id_in), + slobroks(), + messagebus() + { + make_slobroks_config(slobroks, slobrok_port); + } + ~MessageBusConfigSet(); + + void add_builders(ConfigSet &set) { + set.addBuilder(config_id, &slobroks); + set.addBuilder(config_id, &messagebus); + } +}; + +BmCluster::MessageBusConfigSet::~MessageBusConfigSet() = default; + +struct BmCluster::RpcClientConfigSet { + vespalib::string config_id; + SlobroksConfigBuilder slobroks; + + RpcClientConfigSet(const vespalib::string &config_id_in, int slobrok_port) + : config_id(config_id_in), + slobroks() + { + make_slobroks_config(slobroks, slobrok_port); + } + ~RpcClientConfigSet(); + + void add_builders(ConfigSet &set) { + set.addBuilder(config_id, &slobroks); + } +}; + +BmCluster::RpcClientConfigSet::~RpcClientConfigSet() = default; + +BmCluster::BmCluster(const vespalib::string& base_dir, int base_port, const BmClusterParams& params, std::shared_ptr<DocumenttypesConfig> document_types, std::shared_ptr<const document::DocumentTypeRepo> repo) + : _params(params), + _slobrok_port(port_number(base_port, PortBias::SLOBROK_PORT)), + _rpc_client_port(port_number(base_port, PortBias::RPC_CLIENT_PORT)), + _message_bus_config(std::make_unique<MessageBusConfigSet>(message_bus_config_id, _slobrok_port)), + _rpc_client_config(std::make_unique<RpcClientConfigSet>(rpc_client_config_id, _slobrok_port)), + _config_set(std::make_unique<config::ConfigSet>()), + _config_context(std::make_shared<config::ConfigContext>(*_config_set)), + _slobrok(), + _message_bus(), + _rpc_client(), + _base_dir(base_dir), + _base_port(base_port), + _document_types(std::move(document_types)), + _repo(std::move(repo)), + _nodes(params.get_num_nodes()) + +{ + _message_bus_config->add_builders(*_config_set); + _rpc_client_config->add_builders(*_config_set); + vespalib::mkdir(_base_dir, false); +} + +BmCluster::~BmCluster() +{ + _nodes.clear(); + stop_message_bus(); + stop_rpc_client(); + stop_slobrok(); +} + + +void +BmCluster::start_slobrok() +{ + if (!_slobrok) { + LOG(info, "start slobrok"); + _slobrok = std::make_unique<mbus::Slobrok>(_slobrok_port); + } +} + +void +BmCluster::stop_slobrok() +{ + if (_slobrok) { + LOG(info, "stop slobrok"); + _slobrok.reset(); + } +} + +void +BmCluster::wait_slobrok(const vespalib::string &name) +{ + auto &mirror = _rpc_client->slobrok_mirror(); + LOG(info, "Waiting for %s in slobrok", name.c_str()); + for (;;) { + auto specs = mirror.lookup(name); + if (!specs.empty()) { + LOG(info, "Found %s in slobrok", name.c_str()); + return; + } + std::this_thread::sleep_for(100ms); + } +} + +void +BmCluster::start_message_bus() +{ + if (!_message_bus) { + LOG(info, "Starting message bus"); + config::ConfigUri config_uri(message_bus_config_id, _config_context); + _message_bus = std::make_unique<BmMessageBus>(config_uri, _repo); + LOG(info, "Started message bus"); + } +} + +void +BmCluster::stop_message_bus() +{ + if (_message_bus) { + LOG(info, "stop message bus"); + _message_bus.reset(); + } +} + +void +BmCluster::start_rpc_client() +{ + if (!_rpc_client) { + LOG(info, "start rpc client"); + config::ConfigUri client_config_uri(rpc_client_config_id, _config_context); + _rpc_client = std::make_unique<SharedRpcResources> + (client_config_uri, _rpc_client_port, 100, _params.get_rpc_events_before_wakeup()); + _rpc_client->start_server_and_register_slobrok(rpc_client_config_id); + } +} + +void +BmCluster::stop_rpc_client() +{ + if (_rpc_client) { + LOG(info, "stop rpc client"); + _rpc_client->shutdown(); + _rpc_client.reset(); + } +} + +void +BmCluster::make_node(unsigned int node_idx) +{ + assert(node_idx < _nodes.size()); + assert(!_nodes[node_idx]); + vespalib::asciistream s; + s << _base_dir << "/n" << node_idx; + vespalib::string node_base_dir(s.str()); + int node_base_port = port_number(_base_port, PortBias::NUM_PORTS) + BmNode::num_ports() * node_idx; + _nodes[node_idx] = BmNode::create(node_base_dir, node_base_port, node_idx, *this, _params, _document_types, _slobrok_port); +} + +void +BmCluster::make_nodes() +{ + for (unsigned int node_idx = 0; node_idx < _nodes.size(); ++node_idx) { + make_node(node_idx); + } +} + +BmNode& +BmCluster::get_node(unsigned int node_idx) +{ + assert(node_idx < _nodes.size()); + assert(_nodes[node_idx]); + return *_nodes[node_idx]; +} + +void +BmCluster::initialize_providers() +{ + LOG(info, "start initialize"); + for (const auto &node : _nodes) { + if (node) { + node->initialize_persistence_provider(); + } + } +} + +void +BmCluster::create_buckets(BmFeed& feed) +{ + LOG(info, "create %u buckets", feed.num_buckets()); + auto& node = get_node(0); + for (unsigned int i = 0; i < feed.num_buckets(); ++i) { + node.create_bucket(feed.make_bucket(i)); + } +} + +void +BmCluster::start_service_layers() +{ + start_slobrok(); + for (const auto &node : _nodes) { + if (node) { + node->start_service_layer(_params); + } + } + for (const auto &node : _nodes) { + if (node) { + node->wait_service_layer(); + } + } + start_rpc_client(); + for (const auto &node : _nodes) { + if (node) { + node->wait_service_layer_slobrok(); + } + } + BmClusterController fake_controller(get_rpc_client(), _params.get_num_nodes()); + unsigned int node_idx = 0; + for (const auto &node : _nodes) { + if (node) { + fake_controller.set_cluster_up(node_idx, false); + } + ++node_idx; + } +} + +void +BmCluster::start_distributors() +{ + for (const auto &node : _nodes) { + if (node) { + node->start_distributor(_params); + } + } + for (const auto &node : _nodes) { + if (node) { + node->wait_distributor_slobrok(); + } + } + BmClusterController fake_controller(get_rpc_client(), _params.get_num_nodes()); + unsigned int node_idx = 0; + for (const auto &node : _nodes) { + if (node) { + fake_controller.set_cluster_up(node_idx, true); + } + ++node_idx; + } + // Wait for bucket ownership transfer safe time + std::this_thread::sleep_for(2s); +} + +void +BmCluster::create_feed_handlers() +{ + for (const auto &node : _nodes) { + if (node) { + node->create_feed_handler(_params); + } + } +} + +void +BmCluster::shutdown_feed_handlers() +{ + for (const auto &node : _nodes) { + if (node) { + node->shutdown_feed_handler(); + } + } +} + +void +BmCluster::shutdown_distributors() +{ + for (const auto &node : _nodes) { + if (node) { + node->shutdown_distributor(); + } + } +} + +void +BmCluster::shutdown_service_layers() +{ + stop_rpc_client(); + for (const auto &node : _nodes) { + if (node) { + node->shutdown_service_layer(); + } + } + stop_slobrok(); +} + +void +BmCluster::start(BmFeed& feed) +{ + initialize_providers(); + if (!_params.needs_distributor()) { + create_buckets(feed); + } + if (_params.needs_service_layer()) { + start_service_layers(); + } + if (_params.needs_distributor()) { + start_distributors(); + } + if (_params.needs_message_bus()) { + start_message_bus(); + } + create_feed_handlers(); +} + +void +BmCluster::stop() +{ + shutdown_feed_handlers(); + stop_message_bus(); + shutdown_distributors(); + shutdown_service_layers(); +} + +IBmFeedHandler* +BmCluster::get_feed_handler() +{ + return get_node(0).get_feed_handler(); +} + +} diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h new file mode 100644 index 00000000000..0e2e138ab08 --- /dev/null +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h @@ -0,0 +1,79 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "bm_cluster_params.h" +#include <memory> +#include <vector> + +namespace config { + +class IConfigContext; +class ConfigSet; + +} + +namespace document { class DocumentTypeRepo; } +namespace document::internal { class InternalDocumenttypesType; } +namespace mbus { class Slobrok; } +namespace storage::rpc { class SharedRpcResources; } + +namespace search::bmcluster { + +class BmFeed; +class BmMessageBus; +class BmNode; +class IBmFeedHandler; + +/* + * Class representing a benchmark cluster with one or more benchmark nodes. + */ +class BmCluster { + struct MessageBusConfigSet; + struct RpcClientConfigSet; + using DocumenttypesConfig = const document::internal::InternalDocumenttypesType; + BmClusterParams _params; + int _slobrok_port; + int _rpc_client_port; + std::unique_ptr<MessageBusConfigSet> _message_bus_config; + std::unique_ptr<RpcClientConfigSet> _rpc_client_config; + std::unique_ptr<config::ConfigSet> _config_set; + std::shared_ptr<config::IConfigContext> _config_context; + std::unique_ptr<mbus::Slobrok> _slobrok; + std::unique_ptr<BmMessageBus> _message_bus; + std::unique_ptr<storage::rpc::SharedRpcResources> _rpc_client; + vespalib::string _base_dir; + int _base_port; + std::shared_ptr<DocumenttypesConfig> _document_types; + std::shared_ptr<const document::DocumentTypeRepo> _repo; + std::vector<std::unique_ptr<BmNode>> _nodes; + +public: + BmCluster(const vespalib::string& base_dir, int base_port, const BmClusterParams& params, std::shared_ptr<DocumenttypesConfig> document_types, std::shared_ptr<const document::DocumentTypeRepo> repo); + ~BmCluster(); + void start_slobrok(); + void stop_slobrok(); + void wait_slobrok(const vespalib::string &name); + void start_message_bus(); + void stop_message_bus(); + void start_rpc_client(); + void stop_rpc_client(); + void start_service_layers(); + void start_distributors(); + void create_feed_handlers(); + void shutdown_feed_handlers(); + void shutdown_distributors(); + void shutdown_service_layers(); + void create_buckets(BmFeed &feed); + void initialize_providers(); + void start(BmFeed &feed); + void stop(); + storage::rpc::SharedRpcResources &get_rpc_client() { return *_rpc_client; } + BmMessageBus& get_message_bus() { return *_message_bus; } + void make_node(unsigned int node_idx); + void make_nodes(); + BmNode& get_node(unsigned int node_idx); + IBmFeedHandler* get_feed_handler(); +}; + +} diff --git a/searchcore/src/apps/vespa-feed-bm/bm_cluster_controller.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_controller.cpp index a1b40c56e11..bdf4b5fed58 100644 --- a/searchcore/src/apps/vespa-feed-bm/bm_cluster_controller.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_controller.cpp @@ -8,19 +8,22 @@ #include <vespa/vdslib/state/cluster_state_bundle.h> #include <vespa/fnet/frt/target.h> #include <vespa/slobrok/sbmirror.h> +#include <vespa/vespalib/stllike/asciistream.h> using storage::api::StorageMessageAddress; using storage::rpc::SharedRpcResources; using storage::lib::NodeType; -namespace feedbm { +namespace search::bmcluster { namespace { FRT_RPCRequest * -make_set_cluster_state_request() +make_set_cluster_state_request(unsigned int num_nodes) { - storage::lib::ClusterStateBundle bundle(storage::lib::ClusterState("version:2 distributor:1 storage:1")); + vespalib::asciistream s; + s << "version:2 distributor:" << num_nodes << " storage:" << num_nodes; + storage::lib::ClusterStateBundle bundle(storage::lib::ClusterState(s.str())); storage::rpc::SlimeClusterStateBundleCodec codec; auto encoded_bundle = codec.encode(bundle); auto *req = new FRT_RPCRequest(); @@ -34,17 +37,18 @@ make_set_cluster_state_request() } -BmClusterController::BmClusterController(SharedRpcResources& shared_rpc_resources_in) - : _shared_rpc_resources(shared_rpc_resources_in) +BmClusterController::BmClusterController(SharedRpcResources& shared_rpc_resources_in, unsigned int num_nodes) + : _shared_rpc_resources(shared_rpc_resources_in), + _num_nodes(num_nodes) { } void -BmClusterController::set_cluster_up(bool distributor) +BmClusterController::set_cluster_up(unsigned int node_idx, bool distributor) { static vespalib::string _storage("storage"); - StorageMessageAddress storage_address(&_storage, distributor ? NodeType::DISTRIBUTOR : NodeType::STORAGE, 0); - auto req = make_set_cluster_state_request(); + StorageMessageAddress storage_address(&_storage, distributor ? NodeType::DISTRIBUTOR : NodeType::STORAGE, node_idx); + auto req = make_set_cluster_state_request(_num_nodes); auto target_resolver = std::make_unique<storage::rpc::CachingRpcTargetResolver>(_shared_rpc_resources.slobrok_mirror(), _shared_rpc_resources.target_factory(), 1); uint64_t fake_bucket_id = 0; diff --git a/searchcore/src/apps/vespa-feed-bm/bm_cluster_controller.h b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_controller.h index 699036be5c9..7b4313453f1 100644 --- a/searchcore/src/apps/vespa-feed-bm/bm_cluster_controller.h +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_controller.h @@ -5,7 +5,7 @@ namespace storage::api { class StorageMessageAddress; } namespace storage::rpc { class SharedRpcResources; } -namespace feedbm { +namespace search::bmcluster { /* * Fake cluster controller that sets cluster state to be up. @@ -13,9 +13,10 @@ namespace feedbm { class BmClusterController { storage::rpc::SharedRpcResources& _shared_rpc_resources; + unsigned int _num_nodes; public: - BmClusterController(storage::rpc::SharedRpcResources& shared_rpc_resources_in); - void set_cluster_up(bool distributor); + BmClusterController(storage::rpc::SharedRpcResources& shared_rpc_resources_in, unsigned int num_nodes); + void set_cluster_up(unsigned int node_idx, bool distributor); }; } diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.cpp new file mode 100644 index 00000000000..7766ab6c5b3 --- /dev/null +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.cpp @@ -0,0 +1,48 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "bm_cluster_params.h" +#include <iostream> + +namespace search::bmcluster { + +BmClusterParams::BmClusterParams() + : _bucket_db_stripe_bits(0), + _distributor_stripes(0), + _enable_distributor(false), + _enable_service_layer(false), + _indexing_sequencer(), + _num_nodes(1), + _response_threads(2), // Same default as in stor-filestor.def + _rpc_events_before_wakeup(1), // Same default as in stor-communicationmanager.def + _rpc_network_threads(1), // Same default as previous in stor-communicationmanager.def + _rpc_targets_per_node(1), // Same default as in stor-communicationmanager.def + _skip_communicationmanager_thread(false), // Same default as in stor-communicationmanager.def + _skip_get_spi_bucket_info(false), + _use_async_message_handling_on_schedule(false), + _use_document_api(false), + _use_message_bus(false), + _use_storage_chain(false) +{ +} + +BmClusterParams::~BmClusterParams() = default; + +bool +BmClusterParams::check() const +{ + if (_response_threads < 1) { + std::cerr << "Too few response threads: " << _response_threads << std::endl; + return false; + } + if (_rpc_network_threads < 1) { + std::cerr << "Too few rpc network threads: " << _rpc_network_threads << std::endl; + return false; + } + if (_rpc_targets_per_node < 1) { + std::cerr << "Too few rpc targets per node: " << _rpc_targets_per_node << std::endl; + return false; + } + return true; +} + +} diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.h b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.h new file mode 100644 index 00000000000..5bc6b97487c --- /dev/null +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.h @@ -0,0 +1,71 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <cstdint> +#include <vespa/vespalib/stllike/string.h> + +namespace search::bmcluster { + +/* + * Parameters for setting up a benchmark cluster. + */ +class BmClusterParams +{ + uint32_t _bucket_db_stripe_bits; + uint32_t _distributor_stripes; + bool _enable_distributor; + bool _enable_service_layer; + vespalib::string _indexing_sequencer; + uint32_t _num_nodes; + uint32_t _response_threads; + uint32_t _rpc_events_before_wakeup; + uint32_t _rpc_network_threads; + uint32_t _rpc_targets_per_node; + bool _skip_communicationmanager_thread; + bool _skip_get_spi_bucket_info; + bool _use_async_message_handling_on_schedule; + bool _use_document_api; + bool _use_message_bus; + bool _use_storage_chain; +public: + BmClusterParams(); + ~BmClusterParams(); + uint32_t get_bucket_db_stripe_bits() const { return _bucket_db_stripe_bits; } + uint32_t get_distributor_stripes() const { return _distributor_stripes; } + bool get_enable_distributor() const { return _enable_distributor; } + const vespalib::string & get_indexing_sequencer() const { return _indexing_sequencer; } + uint32_t get_num_nodes() const { return _num_nodes; } + uint32_t get_response_threads() const { return _response_threads; } + uint32_t get_rpc_events_before_wakeup() const { return _rpc_events_before_wakeup; } + uint32_t get_rpc_network_threads() const { return _rpc_network_threads; } + uint32_t get_rpc_targets_per_node() const { return _rpc_targets_per_node; } + bool get_skip_communicationmanager_thread() const { return _skip_communicationmanager_thread; } + bool get_skip_get_spi_bucket_info() const { return _skip_get_spi_bucket_info; } + bool get_use_async_message_handling_on_schedule() const { return _use_async_message_handling_on_schedule; } + bool get_use_document_api() const { return _use_document_api; } + bool get_use_message_bus() const { return _use_message_bus; } + bool get_use_storage_chain() const { return _use_storage_chain; } + bool needs_distributor() const { return _enable_distributor || _use_document_api; } + bool needs_message_bus() const { return _use_message_bus || _use_document_api; } + bool needs_service_layer() const { return _enable_service_layer || _enable_distributor || _use_storage_chain || _use_message_bus || _use_document_api; } + void set_bucket_db_stripe_bits(uint32_t value) { _bucket_db_stripe_bits = value; } + void set_distributor_stripes(uint32_t value) { _distributor_stripes = value; } + void set_enable_distributor(bool value) { _enable_distributor = value; } + void set_enable_service_layer(bool value) { _enable_service_layer = value; } + void set_indexing_sequencer(vespalib::stringref sequencer) { _indexing_sequencer = sequencer; } + void set_num_nodes(uint32_t value) { _num_nodes = value; } + void set_response_threads(uint32_t threads_in) { _response_threads = threads_in; } + void set_rpc_events_before_wakeup(uint32_t value) { _rpc_events_before_wakeup = value; } + void set_rpc_network_threads(uint32_t threads_in) { _rpc_network_threads = threads_in; } + void set_rpc_targets_per_node(uint32_t targets_in) { _rpc_targets_per_node = targets_in; } + void set_skip_communicationmanager_thread(bool value) { _skip_communicationmanager_thread = value; } + void set_skip_get_spi_bucket_info(bool value) { _skip_get_spi_bucket_info = value; } + void set_use_async_message_handling_on_schedule(bool value) { _use_async_message_handling_on_schedule = value; } + void set_use_document_api(bool value) { _use_document_api = value; } + void set_use_message_bus(bool value) { _use_message_bus = value; } + void set_use_storage_chain(bool value) { _use_storage_chain = value; } + bool check() const; +}; + +} diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_feed.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_feed.cpp new file mode 100644 index 00000000000..e082f2b96a1 --- /dev/null +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_feed.cpp @@ -0,0 +1,195 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "bm_feed.h" +#include "bm_range.h" +#include "bucket_selector.h" +#include "pending_tracker.h" +#include "i_bm_feed_handler.h" +#include <vespa/document/base/documentid.h> +#include <vespa/document/bucket/bucketid.h> +#include <vespa/document/datatype/documenttype.h> +#include <vespa/document/fieldset/fieldsets.h> +#include <vespa/document/fieldvalue/document.h> +#include <vespa/document/fieldvalue/intfieldvalue.h> +#include <vespa/document/repo/documenttyperepo.h> +#include <vespa/document/test/make_bucket_space.h> +#include <vespa/document/update/assignvalueupdate.h> +#include <vespa/document/update/documentupdate.h> +#include <vespa/vespalib/util/stringfmt.h> +#include <cassert> + +#include <vespa/log/log.h> +LOG_SETUP(".bmcluster.bm_feed"); + +using document::AssignValueUpdate; +using document::Document; +using document::DocumentId; +using document::DocumentType; +using document::DocumentTypeRepo; +using document::DocumentUpdate; +using document::IntFieldValue; +using document::FieldUpdate; + +namespace search::bmcluster { + +BmFeed::BmFeed(std::shared_ptr<const DocumentTypeRepo> repo) + : _repo(std::move(repo)), + _document_type(_repo->getDocumentType("test")), + _field(_document_type->getField("int")), + _bucket_bits(16), + _bucket_space(document::test::makeBucketSpace("test")) +{ +} + +BmFeed::~BmFeed() +{ +} + +DocumentId +BmFeed::make_document_id(uint32_t n, uint32_t i) const +{ + DocumentId id(vespalib::make_string("id::test:n=%u:%u", n & (num_buckets() - 1), i)); + return id; +} + +std::unique_ptr<Document> +BmFeed::make_document(uint32_t n, uint32_t i) const +{ + auto id = make_document_id(n, i); + auto document = std::make_unique<Document>(*_document_type, id); + document->setRepo(*_repo); + document->setFieldValue(_field, std::make_unique<IntFieldValue>(i)); + return document; +} + +std::unique_ptr<DocumentUpdate> +BmFeed::make_document_update(uint32_t n, uint32_t i) const +{ + auto id = make_document_id(n, i); + auto document_update = std::make_unique<DocumentUpdate>(*_repo, *_document_type, id); + document_update->addUpdate(FieldUpdate(_field).addUpdate(AssignValueUpdate(IntFieldValue(15)))); + return document_update; +} + +vespalib::nbostream +BmFeed::make_put_feed(BmRange range, BucketSelector bucket_selector) +{ + vespalib::nbostream serialized_feed; + LOG(debug, "make_put_feed([%u..%u))", range.get_start(), range.get_end()); + for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { + auto n = bucket_selector(i); + serialized_feed << make_bucket_id(n); + auto document = make_document(n, i); + document->serialize(serialized_feed); + } + return serialized_feed; +} + +vespalib::nbostream +BmFeed::make_update_feed(BmRange range, BucketSelector bucket_selector) +{ + vespalib::nbostream serialized_feed; + LOG(debug, "make_update_feed([%u..%u))", range.get_start(), range.get_end()); + for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { + auto n = bucket_selector(i); + serialized_feed << make_bucket_id(n); + auto document_update = make_document_update(n, i); + document_update->serializeHEAD(serialized_feed); + } + return serialized_feed; +} + +vespalib::nbostream +BmFeed::make_remove_feed(BmRange range, BucketSelector bucket_selector) +{ + vespalib::nbostream serialized_feed; + LOG(debug, "make_remove_feed([%u..%u))", range.get_start(), range.get_end()); + for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { + auto n = bucket_selector(i); + serialized_feed << make_bucket_id(n); + auto document_id = make_document_id(n, i); + vespalib::string raw_id = document_id.toString(); + serialized_feed.write(raw_id.c_str(), raw_id.size() + 1); + } + return serialized_feed; +} + + +void +BmFeed::put_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias) +{ + LOG(debug, "put_async_task([%u..%u))", range.get_start(), range.get_end()); + PendingTracker pending_tracker(max_pending); + feed_handler.attach_bucket_info_queue(pending_tracker); + auto &repo = *_repo; + vespalib::nbostream is(serialized_feed.data(), serialized_feed.size()); + document::BucketId bucket_id; + bool use_timestamp = !feed_handler.manages_timestamp(); + for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { + is >> bucket_id; + document::Bucket bucket(_bucket_space, bucket_id); + auto document = std::make_unique<Document>(repo, is); + feed_handler.put(bucket, std::move(document), (use_timestamp ? (time_bias + i) : 0), pending_tracker); + } + assert(is.empty()); + pending_tracker.drain(); +} + +void +BmFeed::update_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias) +{ + LOG(debug, "update_async_task([%u..%u))", range.get_start(), range.get_end()); + PendingTracker pending_tracker(max_pending); + feed_handler.attach_bucket_info_queue(pending_tracker); + auto &repo = *_repo; + vespalib::nbostream is(serialized_feed.data(), serialized_feed.size()); + document::BucketId bucket_id; + bool use_timestamp = !feed_handler.manages_timestamp(); + for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { + is >> bucket_id; + document::Bucket bucket(_bucket_space, bucket_id); + auto document_update = DocumentUpdate::createHEAD(repo, is); + feed_handler.update(bucket, std::move(document_update), (use_timestamp ? (time_bias + i) : 0), pending_tracker); + } + assert(is.empty()); + pending_tracker.drain(); +} + +void +BmFeed::get_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed) +{ + LOG(debug, "get_async_task([%u..%u))", range.get_start(), range.get_end()); + search::bmcluster::PendingTracker pending_tracker(max_pending); + vespalib::nbostream is(serialized_feed.data(), serialized_feed.size()); + document::BucketId bucket_id; + vespalib::string all_fields(document::AllFields::NAME); + for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { + is >> bucket_id; + document::Bucket bucket(_bucket_space, bucket_id); + DocumentId document_id(is); + feed_handler.get(bucket, all_fields, document_id, pending_tracker); + } + assert(is.empty()); + pending_tracker.drain(); +} + +void +BmFeed::remove_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias) +{ + LOG(debug, "remove_async_task([%u..%u))", range.get_start(), range.get_end()); + search::bmcluster::PendingTracker pending_tracker(max_pending); + feed_handler.attach_bucket_info_queue(pending_tracker); + vespalib::nbostream is(serialized_feed.data(), serialized_feed.size()); + document::BucketId bucket_id; + bool use_timestamp = !feed_handler.manages_timestamp(); + for (unsigned int i = range.get_start(); i < range.get_end(); ++i) { + is >> bucket_id; + document::Bucket bucket(_bucket_space, bucket_id); + DocumentId document_id(is); + feed_handler.remove(bucket, document_id, (use_timestamp ? (time_bias + i) : 0), pending_tracker); + } + assert(is.empty()); + pending_tracker.drain(); +} + +} diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_feed.h b/searchcore/src/vespa/searchcore/bmcluster/bm_feed.h new file mode 100644 index 00000000000..a6afe7b10d9 --- /dev/null +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_feed.h @@ -0,0 +1,57 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <vespa/document/base/documentid.h> +#include <vespa/document/bucket/bucketspace.h> +#include <vespa/document/bucket/bucketid.h> +#include <vespa/document/bucket/bucket.h> + +namespace document { + +class Document; +class DocumentType; +class DocumentTypeRepo; +class DocumentUpdate; +class Field; + +} + +namespace vespalib { class nbostream; } + +namespace search::bmcluster { + +class BmRange; +class BucketSelector; +class IBmFeedHandler; + +/* + * Class to generate synthetic feed of documents. + */ +class BmFeed { + std::shared_ptr<const document::DocumentTypeRepo> _repo; + const document::DocumentType* _document_type; + const document::Field& _field; + uint32_t _bucket_bits; + document::BucketSpace _bucket_space; +public: + + BmFeed(std::shared_ptr<const document::DocumentTypeRepo> document_types); + ~BmFeed(); + uint32_t num_buckets() const { return (1u << _bucket_bits); } + document::BucketSpace get_bucket_space() const noexcept { return _bucket_space; } + document::BucketId make_bucket_id(uint32_t n) const { return document::BucketId(_bucket_bits, n & (num_buckets() - 1)); } + document::Bucket make_bucket(uint32_t n) const { return document::Bucket(_bucket_space, make_bucket_id(n)); } + document::DocumentId make_document_id(uint32_t n, uint32_t i) const; + std::unique_ptr<document::Document> make_document(uint32_t n, uint32_t i) const; + std::unique_ptr<document::DocumentUpdate> make_document_update(uint32_t n, uint32_t i) const; + vespalib::nbostream make_put_feed(BmRange range, BucketSelector bucket_selector); + vespalib::nbostream make_update_feed(BmRange range, BucketSelector bucket_selector); + vespalib::nbostream make_remove_feed(BmRange range, BucketSelector bucket_selector); + void put_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias); + void update_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias); + void get_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed); + void remove_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias); +}; + +} diff --git a/searchcore/src/apps/vespa-feed-bm/bm_message_bus.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_message_bus.cpp index b608593dada..d947ca5e109 100644 --- a/searchcore/src/apps/vespa-feed-bm/bm_message_bus.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_message_bus.cpp @@ -24,7 +24,7 @@ using mbus::SourceSession; using storage::mbusprot::StorageProtocol; using storage::mbusprot::StorageReply; -namespace feedbm { +namespace search::bmcluster { namespace { @@ -111,7 +111,9 @@ BmMessageBus::ReplyHandler::handleReply(std::unique_ptr<Reply> reply) } if (failed) { ++_errors; - LOG(error, "Unexpected %s", reply_as_string(*reply).c_str()); + if (_errors <= 10) { + LOG(error, "Unexpected %s", reply_as_string(*reply).c_str()); + } } tracker->release(); } else { diff --git a/searchcore/src/apps/vespa-feed-bm/bm_message_bus.h b/searchcore/src/vespa/searchcore/bmcluster/bm_message_bus.h index a9cff1fb826..7829a4e4946 100644 --- a/searchcore/src/apps/vespa-feed-bm/bm_message_bus.h +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_message_bus.h @@ -16,12 +16,12 @@ class SourceSession; } -namespace feedbm { +namespace search::bmcluster { class PendingTracker; /* - * Message bus for feed benchmark program. + * Message bus for benchmark cluster. */ class BmMessageBus { diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp new file mode 100644 index 00000000000..84bd921620d --- /dev/null +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp @@ -0,0 +1,738 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "bm_node.h" +#include "bm_cluster.h" +#include "bm_cluster_params.h" +#include "bm_message_bus.h" +#include "bm_storage_chain_builder.h" +#include "bm_storage_link_context.h" +#include "storage_api_chain_bm_feed_handler.h" +#include "storage_api_message_bus_bm_feed_handler.h" +#include "storage_api_rpc_bm_feed_handler.h" +#include "document_api_message_bus_bm_feed_handler.h" +#include "i_bm_feed_handler.h" +#include "spi_bm_feed_handler.h" +#include <vespa/config-attributes.h> +#include <vespa/config-bucketspaces.h> +#include <vespa/config-imported-fields.h> +#include <vespa/config-indexschema.h> +#include <vespa/config-persistence.h> +#include <vespa/config-rank-profiles.h> +#include <vespa/config-slobroks.h> +#include <vespa/config-stor-distribution.h> +#include <vespa/config-stor-filestor.h> +#include <vespa/config-summary.h> +#include <vespa/config-summarymap.h> +#include <vespa/config-upgrading.h> +#include <vespa/config/common/configcontext.h> +#include <vespa/document/bucket/bucketspace.h> +#include <vespa/document/fieldset/fieldsetrepo.h> +#include <vespa/document/repo/configbuilder.h> +#include <vespa/document/repo/document_type_repo_factory.h> +#include <vespa/document/repo/documenttyperepo.h> +#include <vespa/document/test/make_bucket_space.h> +#include <vespa/messagebus/config-messagebus.h> +#include <vespa/messagebus/testlib/slobrok.h> +#include <vespa/metrics/config-metricsmanager.h> +#include <vespa/searchcommon/common/schemaconfigurer.h> +#include <vespa/searchcore/proton/common/alloc_config.h> +#include <vespa/searchcore/proton/matching/querylimiter.h> +#include <vespa/searchcore/proton/metrics/metricswireservice.h> +#include <vespa/searchcore/proton/persistenceengine/ipersistenceengineowner.h> +#include <vespa/searchcore/proton/persistenceengine/i_resource_write_filter.h> +#include <vespa/searchcore/proton/persistenceengine/persistenceengine.h> +#include <vespa/searchcore/proton/server/bootstrapconfig.h> +#include <vespa/searchcore/proton/server/documentdb.h> +#include <vespa/searchcore/proton/server/document_db_maintenance_config.h> +#include <vespa/searchcore/proton/server/documentdbconfigmanager.h> +#include <vespa/searchcore/proton/server/fileconfigmanager.h> +#include <vespa/searchcore/proton/server/memoryconfigstore.h> +#include <vespa/searchcore/proton/server/persistencehandlerproxy.h> +#include <vespa/searchcore/proton/test/disk_mem_usage_notifier.h> +#include <vespa/searchlib/index/dummyfileheadercontext.h> +#include <vespa/searchlib/transactionlog/translogserver.h> +#include <vespa/searchsummary/config/config-juniperrc.h> +#include <vespa/storage/bucketdb/config-stor-bucket-init.h> +#include <vespa/storage/common/i_storage_chain_builder.h> +#include <vespa/storage/config/config-stor-bouncer.h> +#include <vespa/storage/config/config-stor-communicationmanager.h> +#include <vespa/storage/config/config-stor-distributormanager.h> +#include <vespa/storage/config/config-stor-opslogger.h> +#include <vespa/storage/config/config-stor-prioritymapping.h> +#include <vespa/storage/config/config-stor-server.h> +#include <vespa/storage/config/config-stor-status.h> +#include <vespa/storage/config/config-stor-visitordispatcher.h> +#include <vespa/storage/storageserver/rpc/shared_rpc_resources.h> +#include <vespa/storage/visiting/config-stor-visitor.h> +#include <vespa/storageserver/app/distributorprocess.h> +#include <vespa/storageserver/app/servicelayerprocess.h> +#include <vespa/vespalib/io/fileutil.h> +#include <vespa/vespalib/stllike/asciistream.h> +#include <vespa/vespalib/util/size_literals.h> +#include <tests/proton/common/dummydbowner.h> + +#include <vespa/log/log.h> +LOG_SETUP(".bmcluster.bm_node"); + +using cloud::config::SlobroksConfigBuilder; +using cloud::config::filedistribution::FiledistributorrpcConfig; +using config::ConfigSet; +using document::BucketSpace; +using document::DocumenttypesConfig; +using document::DocumenttypesConfigBuilder; +using document::DocumentType; +using document::DocumentTypeRepo; +using document::Field; +using messagebus::MessagebusConfigBuilder; +using metrics::MetricsmanagerConfigBuilder; +using proton::BootstrapConfig; +using proton::DocTypeName; +using proton::DocumentDB; +using proton::DocumentDBConfig; +using proton::HwInfo; +using search::index::Schema; +using search::index::SchemaBuilder; +using search::transactionlog::TransLogServer; +using storage::rpc::SharedRpcResources; +using storage::rpc::StorageApiRpcService; +using storage::spi::PersistenceProvider; +using vespa::config::content::PersistenceConfigBuilder; +using vespa::config::content::StorDistributionConfigBuilder; +using vespa::config::content::StorFilestorConfigBuilder; +using vespa::config::content::UpgradingConfigBuilder; +using vespa::config::content::core::BucketspacesConfig; +using vespa::config::content::core::BucketspacesConfigBuilder; +using vespa::config::content::core::StorBouncerConfigBuilder; +using vespa::config::content::core::StorBucketInitConfigBuilder; +using vespa::config::content::core::StorCommunicationmanagerConfigBuilder; +using vespa::config::content::core::StorDistributormanagerConfigBuilder; +using vespa::config::content::core::StorOpsloggerConfigBuilder; +using vespa::config::content::core::StorPrioritymappingConfigBuilder; +using vespa::config::content::core::StorServerConfigBuilder; +using vespa::config::content::core::StorStatusConfigBuilder; +using vespa::config::content::core::StorVisitorConfigBuilder; +using vespa::config::content::core::StorVisitordispatcherConfigBuilder; +using vespa::config::search::AttributesConfig; +using vespa::config::search::AttributesConfigBuilder; +using vespa::config::search::ImportedFieldsConfig; +using vespa::config::search::IndexschemaConfig; +using vespa::config::search::RankProfilesConfig; +using vespa::config::search::SummaryConfig; +using vespa::config::search::SummarymapConfig; +using vespa::config::search::core::ProtonConfig; +using vespa::config::search::core::ProtonConfigBuilder; +using vespa::config::search::summary::JuniperrcConfig; +using vespalib::compression::CompressionConfig; + +namespace search::bmcluster { + +namespace { + +enum PortBias +{ + TLS_LISTEN_PORT, + SERVICE_LAYER_MBUS_PORT, + SERVICE_LAYER_RPC_PORT, + SERVICE_LAYER_STATUS_PORT, + DISTRIBUTOR_MBUS_PORT, + DISTRIBUTOR_RPC_PORT, + DISTRIBUTOR_STATUS_PORT, + NUM_PORTS, + +}; + +int port_number(int base_port, PortBias bias) +{ + return base_port + static_cast<int>(bias); +} + +storage::spi::Context context(storage::spi::Priority(0), 0); + +} + +std::shared_ptr<AttributesConfig> make_attributes_config() { + AttributesConfigBuilder builder; + AttributesConfig::Attribute attribute; + attribute.name = "int"; + attribute.datatype = AttributesConfig::Attribute::Datatype::INT32; + builder.attribute.emplace_back(attribute); + return std::make_shared<AttributesConfig>(builder); +} + +std::shared_ptr<DocumentDBConfig> make_document_db_config(std::shared_ptr<DocumenttypesConfig> document_types, std::shared_ptr<const DocumentTypeRepo> repo, const DocTypeName& doc_type_name) +{ + auto indexschema = std::make_shared<IndexschemaConfig>(); + auto attributes = make_attributes_config(); + auto summary = std::make_shared<SummaryConfig>(); + std::shared_ptr<Schema> schema(new Schema()); + SchemaBuilder::build(*indexschema, *schema); + SchemaBuilder::build(*attributes, *schema); + SchemaBuilder::build(*summary, *schema); + return std::make_shared<DocumentDBConfig>( + 1, + std::make_shared<RankProfilesConfig>(), + std::make_shared<proton::matching::RankingConstants>(), + std::make_shared<proton::matching::RankingExpressions>(), + std::make_shared<proton::matching::OnnxModels>(), + indexschema, + attributes, + summary, + std::make_shared<SummarymapConfig>(), + std::make_shared<JuniperrcConfig>(), + document_types, + repo, + std::make_shared<ImportedFieldsConfig>(), + std::make_shared<TuneFileDocumentDB>(), + schema, + std::make_shared<proton::DocumentDBMaintenanceConfig>(), + search::LogDocumentStore::Config(), + std::make_shared<const proton::ThreadingServiceConfig>(proton::ThreadingServiceConfig::make(1)), + std::make_shared<const proton::AllocConfig>(), + "client", + doc_type_name.getName()); +} + +void +make_slobroks_config(SlobroksConfigBuilder& slobroks, int slobrok_port) +{ + SlobroksConfigBuilder::Slobrok slobrok; + slobrok.connectionspec = vespalib::make_string("tcp/localhost:%d", slobrok_port); + slobroks.slobrok.push_back(std::move(slobrok)); +} + +void +make_bucketspaces_config(BucketspacesConfigBuilder& bucketspaces) +{ + BucketspacesConfigBuilder::Documenttype bucket_space_map; + bucket_space_map.name = "test"; + bucket_space_map.bucketspace = "default"; + bucketspaces.documenttype.emplace_back(std::move(bucket_space_map)); +} + +class MyPersistenceEngineOwner : public proton::IPersistenceEngineOwner +{ + void setClusterState(BucketSpace, const storage::spi::ClusterState&) override { } +}; + +struct MyResourceWriteFilter : public proton::IResourceWriteFilter +{ + bool acceptWriteOperation() const override { return true; } + State getAcceptState() const override { return IResourceWriteFilter::State(); } +}; + +class MyServiceLayerProcess : public storage::ServiceLayerProcess { + PersistenceProvider& _provider; + +public: + MyServiceLayerProcess(const config::ConfigUri& configUri, + PersistenceProvider& provider, + std::unique_ptr<storage::IStorageChainBuilder> chain_builder); + ~MyServiceLayerProcess() override { shutdown(); } + + void shutdown() override; + void setupProvider() override; + PersistenceProvider& getProvider() override; +}; + +MyServiceLayerProcess::MyServiceLayerProcess(const config::ConfigUri& configUri, + PersistenceProvider& provider, + std::unique_ptr<storage::IStorageChainBuilder> chain_builder) + : ServiceLayerProcess(configUri), + _provider(provider) +{ + if (chain_builder) { + set_storage_chain_builder(std::move(chain_builder)); + } +} + +void +MyServiceLayerProcess::shutdown() +{ + ServiceLayerProcess::shutdown(); +} + +void +MyServiceLayerProcess::setupProvider() +{ +} + +PersistenceProvider& +MyServiceLayerProcess::getProvider() +{ + return _provider; +} + +struct StorageConfigSet +{ + vespalib::string config_id; + DocumenttypesConfigBuilder documenttypes; + StorDistributionConfigBuilder stor_distribution; + StorBouncerConfigBuilder stor_bouncer; + StorCommunicationmanagerConfigBuilder stor_communicationmanager; + StorOpsloggerConfigBuilder stor_opslogger; + StorPrioritymappingConfigBuilder stor_prioritymapping; + UpgradingConfigBuilder upgrading; + StorServerConfigBuilder stor_server; + StorStatusConfigBuilder stor_status; + BucketspacesConfigBuilder bucketspaces; + MetricsmanagerConfigBuilder metricsmanager; + SlobroksConfigBuilder slobroks; + MessagebusConfigBuilder messagebus; + + StorageConfigSet(const vespalib::string &base_dir, unsigned int node_idx, bool distributor, const vespalib::string& config_id_in, const DocumenttypesConfig& documenttypes_in, + int slobrok_port, int mbus_port, int rpc_port, int status_port, const BmClusterParams& params) + : config_id(config_id_in), + documenttypes(documenttypes_in), + stor_distribution(), + stor_bouncer(), + stor_communicationmanager(), + stor_opslogger(), + stor_prioritymapping(), + upgrading(), + stor_server(), + stor_status(), + bucketspaces(), + metricsmanager(), + slobroks(), + messagebus() + { + { + auto& dc = stor_distribution; + { + StorDistributionConfigBuilder::Group group; + { + for (unsigned int i = 0; i < params.get_num_nodes(); ++i) { + StorDistributionConfigBuilder::Group::Nodes node; + node.index = i; + group.nodes.push_back(std::move(node)); + } + } + group.index = "invalid"; + group.name = "invalid"; + group.capacity = 1.0; + group.partitions = ""; + dc.group.push_back(std::move(group)); + } + dc.redundancy = 1; + dc.readyCopies = 1; + } + stor_server.nodeIndex = node_idx; + stor_server.isDistributor = distributor; + stor_server.contentNodeBucketDbStripeBits = params.get_bucket_db_stripe_bits(); + if (distributor) { + stor_server.rootFolder = base_dir + "/distributor"; + } else { + stor_server.rootFolder = base_dir + "/storage"; + } + make_slobroks_config(slobroks, slobrok_port); + stor_communicationmanager.rpc.numNetworkThreads = params.get_rpc_network_threads(); + stor_communicationmanager.rpc.eventsBeforeWakeup = params.get_rpc_events_before_wakeup(); + stor_communicationmanager.rpc.numTargetsPerNode = params.get_rpc_targets_per_node(); + stor_communicationmanager.mbusport = mbus_port; + stor_communicationmanager.rpcport = rpc_port; + stor_communicationmanager.skipThread = params.get_skip_communicationmanager_thread(); + + stor_status.httpport = status_port; + make_bucketspaces_config(bucketspaces); + } + + ~StorageConfigSet(); + + void add_builders(ConfigSet& set) { + set.addBuilder(config_id, &documenttypes); + set.addBuilder(config_id, &stor_distribution); + set.addBuilder(config_id, &stor_bouncer); + set.addBuilder(config_id, &stor_communicationmanager); + set.addBuilder(config_id, &stor_opslogger); + set.addBuilder(config_id, &stor_prioritymapping); + set.addBuilder(config_id, &upgrading); + set.addBuilder(config_id, &stor_server); + set.addBuilder(config_id, &stor_status); + set.addBuilder(config_id, &bucketspaces); + set.addBuilder(config_id, &metricsmanager); + set.addBuilder(config_id, &slobroks); + set.addBuilder(config_id, &messagebus); + } +}; + +StorageConfigSet::~StorageConfigSet() = default; + +struct ServiceLayerConfigSet : public StorageConfigSet +{ + PersistenceConfigBuilder persistence; + StorFilestorConfigBuilder stor_filestor; + StorBucketInitConfigBuilder stor_bucket_init; + StorVisitorConfigBuilder stor_visitor; + + ServiceLayerConfigSet(const vespalib::string& base_dir, unsigned int node_idx, const vespalib::string& config_id_in, const DocumenttypesConfig& documenttypes_in, + int slobrok_port, int mbus_port, int rpc_port, int status_port, const BmClusterParams& params) + : StorageConfigSet(base_dir, node_idx, false, config_id_in, documenttypes_in, slobrok_port, mbus_port, rpc_port, status_port, params), + persistence(), + stor_filestor(), + stor_bucket_init(), + stor_visitor() + { + stor_filestor.numResponseThreads = params.get_response_threads(); + stor_filestor.numNetworkThreads = params.get_rpc_network_threads(); + stor_filestor.useAsyncMessageHandlingOnSchedule = params.get_use_async_message_handling_on_schedule(); + } + + ~ServiceLayerConfigSet(); + + void add_builders(ConfigSet& set) { + StorageConfigSet::add_builders(set); + set.addBuilder(config_id, &persistence); + set.addBuilder(config_id, &stor_filestor); + set.addBuilder(config_id, &stor_bucket_init); + set.addBuilder(config_id, &stor_visitor); + } +}; + +ServiceLayerConfigSet::~ServiceLayerConfigSet() = default; + +struct DistributorConfigSet : public StorageConfigSet +{ + StorDistributormanagerConfigBuilder stor_distributormanager; + StorVisitordispatcherConfigBuilder stor_visitordispatcher; + + DistributorConfigSet(const vespalib::string& base_dir, unsigned int node_idx, const vespalib::string& config_id_in, const DocumenttypesConfig& documenttypes_in, + int slobrok_port, int mbus_port, int rpc_port, int status_port, const BmClusterParams& params) + : StorageConfigSet(base_dir, node_idx, true, config_id_in, documenttypes_in, slobrok_port, mbus_port, rpc_port, status_port, params), + stor_distributormanager(), + stor_visitordispatcher() + { + stor_distributormanager.numDistributorStripes = params.get_distributor_stripes(); + } + + ~DistributorConfigSet(); + + void add_builders(ConfigSet& set) { + StorageConfigSet::add_builders(set); + set.addBuilder(config_id, &stor_distributormanager); + set.addBuilder(config_id, &stor_visitordispatcher); + } +}; + +DistributorConfigSet::~DistributorConfigSet() = default; + +BmNode::BmNode() = default; + +BmNode::~BmNode() = default; + +class MyBmNode : public BmNode +{ + BmCluster& _cluster; + std::shared_ptr<DocumenttypesConfig> _document_types; + std::shared_ptr<const DocumentTypeRepo> _repo; + proton::DocTypeName _doc_type_name; + std::shared_ptr<DocumentDBConfig> _document_db_config; + vespalib::string _base_dir; + search::index::DummyFileHeaderContext _file_header_context; + unsigned int _node_idx; + int _tls_listen_port; + int _slobrok_port; + int _service_layer_mbus_port; + int _service_layer_rpc_port; + int _service_layer_status_port; + int _distributor_mbus_port; + int _distributor_rpc_port; + int _distributor_status_port; + TransLogServer _tls; + vespalib::string _tls_spec; + proton::matching::QueryLimiter _query_limiter; + vespalib::Clock _clock; + proton::DummyWireService _metrics_wire_service; + proton::MemoryConfigStores _config_stores; + vespalib::ThreadStackExecutor _summary_executor; + proton::DummyDBOwner _document_db_owner; + BucketSpace _bucket_space; + std::shared_ptr<DocumentDB> _document_db; + MyPersistenceEngineOwner _persistence_owner; + MyResourceWriteFilter _write_filter; + proton::test::DiskMemUsageNotifier _disk_mem_usage_notifier; + std::shared_ptr<proton::PersistenceEngine> _persistence_engine; + std::unique_ptr<const document::FieldSetRepo> _field_set_repo; + ServiceLayerConfigSet _service_layer_config; + DistributorConfigSet _distributor_config; + ConfigSet _config_set; + std::shared_ptr<config::IConfigContext> _config_context; + std::unique_ptr<IBmFeedHandler> _feed_handler; + std::unique_ptr<mbus::Slobrok> _slobrok; + std::shared_ptr<BmStorageLinkContext> _service_layer_chain_context; + std::unique_ptr<MyServiceLayerProcess> _service_layer; + std::shared_ptr<BmStorageLinkContext> _distributor_chain_context; + std::unique_ptr<storage::DistributorProcess> _distributor; + + void create_document_db(const BmClusterParams& params); +public: + MyBmNode(const vespalib::string &base_dir, int base_port, unsigned int node_idx, BmCluster& cluster, const BmClusterParams& params, std::shared_ptr<document::DocumenttypesConfig> document_types, int slobrok_port); + ~MyBmNode() override; + void initialize_persistence_provider() override; + void create_bucket(const document::Bucket& bucket) override; + void start_service_layer(const BmClusterParams& params) override; + void wait_service_layer() override; + void start_distributor(const BmClusterParams& params) override; + void create_feed_handler(const BmClusterParams& params) override; + void shutdown_feed_handler() override; + void shutdown_distributor() override; + void shutdown_service_layer() override; + void wait_service_layer_slobrok() override; + void wait_distributor_slobrok() override; + IBmFeedHandler* get_feed_handler() override; + PersistenceProvider* get_persistence_provider() override; +}; + +MyBmNode::MyBmNode(const vespalib::string& base_dir, int base_port, unsigned int node_idx, BmCluster& cluster, const BmClusterParams& params, std::shared_ptr<document::DocumenttypesConfig> document_types, int slobrok_port) + : BmNode(), + _cluster(cluster), + _document_types(std::move(document_types)), + _repo(document::DocumentTypeRepoFactory::make(*_document_types)), + _doc_type_name("test"), + _document_db_config(make_document_db_config(_document_types, _repo, _doc_type_name)), + _base_dir(base_dir), + _file_header_context(), + _node_idx(node_idx), + _tls_listen_port(port_number(base_port, PortBias::TLS_LISTEN_PORT)), + _slobrok_port(slobrok_port), + _service_layer_mbus_port(port_number(base_port, PortBias::SERVICE_LAYER_MBUS_PORT)), + _service_layer_rpc_port(port_number(base_port, PortBias::SERVICE_LAYER_RPC_PORT)), + _service_layer_status_port(port_number(base_port, PortBias::SERVICE_LAYER_STATUS_PORT)), + _distributor_mbus_port(port_number(base_port, PortBias::DISTRIBUTOR_MBUS_PORT)), + _distributor_rpc_port(port_number(base_port, PortBias::DISTRIBUTOR_RPC_PORT)), + _distributor_status_port(port_number(base_port, PortBias::DISTRIBUTOR_STATUS_PORT)), + _tls("tls", _tls_listen_port, _base_dir, _file_header_context), + _tls_spec(vespalib::make_string("tcp/localhost:%d", _tls_listen_port)), + _query_limiter(), + _clock(), + _metrics_wire_service(), + _config_stores(), + _summary_executor(8, 128_Ki), + _document_db_owner(), + _bucket_space(document::test::makeBucketSpace(_doc_type_name.getName())), + _document_db(), + _persistence_owner(), + _write_filter(), + _disk_mem_usage_notifier(), + _persistence_engine(), + _field_set_repo(std::make_unique<const document::FieldSetRepo>(*_repo)), + _service_layer_config(_base_dir, _node_idx, "bm-servicelayer", *_document_types, _slobrok_port, _service_layer_mbus_port, _service_layer_rpc_port, _service_layer_status_port, params), + _distributor_config(_base_dir, _node_idx, "bm-distributor", *_document_types, _slobrok_port, _distributor_mbus_port, _distributor_rpc_port, _distributor_status_port, params), + _config_set(), + _config_context(std::make_shared<config::ConfigContext>(_config_set)), + _feed_handler(), + _slobrok(), + _service_layer_chain_context(), + _service_layer(), + _distributor_chain_context(), + _distributor() +{ + create_document_db(params); + _persistence_engine = std::make_unique<proton::PersistenceEngine>(_persistence_owner, _write_filter, _disk_mem_usage_notifier, -1, false); + auto proxy = std::make_shared<proton::PersistenceHandlerProxy>(_document_db); + _persistence_engine->putHandler(_persistence_engine->getWLock(), _bucket_space, _doc_type_name, proxy); + _service_layer_config.add_builders(_config_set); + _distributor_config.add_builders(_config_set); + _feed_handler = std::make_unique<SpiBmFeedHandler>(*_persistence_engine, *_field_set_repo, params.get_skip_get_spi_bucket_info()); +} + +MyBmNode::~MyBmNode() +{ + if (_persistence_engine) { + _persistence_engine->destroyIterators(); + _persistence_engine->removeHandler(_persistence_engine->getWLock(), _bucket_space, _doc_type_name); + } + if (_document_db) { + _document_db->close(); + } +} + +void +MyBmNode::create_document_db(const BmClusterParams& params) +{ + vespalib::mkdir(_base_dir, false); + vespalib::mkdir(_base_dir + "/" + _doc_type_name.getName(), false); + vespalib::string input_cfg = _base_dir + "/" + _doc_type_name.getName() + "/baseconfig"; + { + proton::FileConfigManager fileCfg(input_cfg, "", _doc_type_name.getName()); + fileCfg.saveConfig(*_document_db_config, 1); + } + config::DirSpec spec(input_cfg + "/config-1"); + auto tuneFileDocDB = std::make_shared<TuneFileDocumentDB>(); + proton::DocumentDBConfigHelper mgr(spec, _doc_type_name.getName()); + auto protonCfg = std::make_shared<ProtonConfigBuilder>(); + if ( ! params.get_indexing_sequencer().empty()) { + vespalib::string sequencer = params.get_indexing_sequencer(); + std::transform(sequencer.begin(), sequencer.end(), sequencer.begin(), [](unsigned char c){ return std::toupper(c); }); + protonCfg->indexing.optimize = ProtonConfig::Indexing::getOptimize(sequencer); + } + auto bootstrap_config = std::make_shared<BootstrapConfig>(1, + _document_types, + _repo, + std::move(protonCfg), + std::make_shared<FiledistributorrpcConfig>(), + std::make_shared<BucketspacesConfig>(), + tuneFileDocDB, HwInfo()); + mgr.forwardConfig(bootstrap_config); + mgr.nextGeneration(0ms); + _document_db = DocumentDB::create(_base_dir, mgr.getConfig(), _tls_spec, _query_limiter, _clock, _doc_type_name, + _bucket_space, *bootstrap_config->getProtonConfigSP(), _document_db_owner, + _summary_executor, _summary_executor, *_persistence_engine, _tls, + _metrics_wire_service, _file_header_context, + _config_stores.getConfigStore(_doc_type_name.toString()), + std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), HwInfo()); + _document_db->start(); + _document_db->waitForOnlineState(); +} + +void +MyBmNode::initialize_persistence_provider() +{ + get_persistence_provider()->initialize(); +} + +void +MyBmNode::create_bucket(const document::Bucket& bucket) +{ + get_persistence_provider()->createBucket(storage::spi::Bucket(bucket), context); +} + +void +MyBmNode::start_service_layer(const BmClusterParams& params) +{ + config::ConfigUri config_uri("bm-servicelayer", _config_context); + std::unique_ptr<BmStorageChainBuilder> chain_builder; + if (params.get_use_storage_chain() && !params.needs_distributor()) { + chain_builder = std::make_unique<BmStorageChainBuilder>(); + _service_layer_chain_context = chain_builder->get_context(); + } + _service_layer = std::make_unique<MyServiceLayerProcess>(config_uri, + *_persistence_engine, + std::move(chain_builder)); + _service_layer->setupConfig(100ms); + _service_layer->createNode(); +} + +void +MyBmNode::wait_service_layer() +{ + _service_layer->getNode().waitUntilInitialized(); +} + +void +MyBmNode::start_distributor(const BmClusterParams& params) +{ + config::ConfigUri config_uri("bm-distributor", _config_context); + std::unique_ptr<BmStorageChainBuilder> chain_builder; + if (params.get_use_storage_chain() && !params.get_use_document_api()) { + chain_builder = std::make_unique<BmStorageChainBuilder>(); + _distributor_chain_context = chain_builder->get_context(); + } + _distributor = std::make_unique<storage::DistributorProcess>(config_uri); + if (chain_builder) { + _distributor->set_storage_chain_builder(std::move(chain_builder)); + } + _distributor->setupConfig(100ms); + _distributor->createNode(); +} + +void +MyBmNode::create_feed_handler(const BmClusterParams& params) +{ + StorageApiRpcService::Params rpc_params; + // This is the same compression config as the default in stor-communicationmanager.def. + rpc_params.compression_config = CompressionConfig(CompressionConfig::Type::LZ4, 3, 90, 1024); + rpc_params.num_rpc_targets_per_node = params.get_rpc_targets_per_node(); + if (params.get_use_document_api()) { + _feed_handler = std::make_unique<DocumentApiMessageBusBmFeedHandler>(_cluster.get_message_bus()); + } else if (params.get_enable_distributor()) { + if (params.get_use_storage_chain()) { + assert(_distributor_chain_context); + _feed_handler = std::make_unique<StorageApiChainBmFeedHandler>(_distributor_chain_context, true); + } else if (params.get_use_message_bus()) { + _feed_handler = std::make_unique<StorageApiMessageBusBmFeedHandler>(_cluster.get_message_bus(), true); + } else { + _feed_handler = std::make_unique<StorageApiRpcBmFeedHandler>(_cluster.get_rpc_client(), _repo, rpc_params, true); + } + } else if (params.needs_service_layer()) { + if (params.get_use_storage_chain()) { + assert(_service_layer_chain_context); + _feed_handler = std::make_unique<StorageApiChainBmFeedHandler>(_service_layer_chain_context, false); + } else if (params.get_use_message_bus()) { + _feed_handler = std::make_unique<StorageApiMessageBusBmFeedHandler>(_cluster.get_message_bus(), false); + } else { + _feed_handler = std::make_unique<StorageApiRpcBmFeedHandler>(_cluster.get_rpc_client(), _repo, rpc_params, false); + } + } +} + +void +MyBmNode::shutdown_feed_handler() +{ + _feed_handler.reset(); +} + +void +MyBmNode::shutdown_distributor() +{ + if (_distributor) { + LOG(info, "stop distributor"); + _distributor->getNode().requestShutdown("controlled shutdown"); + _distributor->shutdown(); + } +} + +void +MyBmNode::shutdown_service_layer() +{ + if (_service_layer) { + LOG(info, "stop service layer"); + _service_layer->getNode().requestShutdown("controlled shutdown"); + _service_layer->shutdown(); + } +} + +IBmFeedHandler* +MyBmNode::get_feed_handler() +{ + return _feed_handler.get(); +} + +PersistenceProvider* +MyBmNode::get_persistence_provider() +{ + return _persistence_engine.get(); +} + +void +MyBmNode::wait_service_layer_slobrok() +{ + vespalib::asciistream s; + s << "storage/cluster.storage/storage/" << _node_idx; + _cluster.wait_slobrok(s.str()); + s << "/default"; + _cluster.wait_slobrok(s.str()); +} + +void +MyBmNode::wait_distributor_slobrok() +{ + vespalib::asciistream s; + s << "storage/cluster.storage/distributor/" << _node_idx; + _cluster.wait_slobrok(s.str()); + s << "/default"; + _cluster.wait_slobrok(s.str()); +} + +unsigned int +BmNode::num_ports() +{ + return static_cast<unsigned int>(PortBias::NUM_PORTS); +} + +std::unique_ptr<BmNode> +BmNode::create(const vespalib::string& base_dir, int base_port, unsigned int node_idx, BmCluster &cluster, const BmClusterParams& params, std::shared_ptr<document::DocumenttypesConfig> document_types, int slobrok_port) +{ + return std::make_unique<MyBmNode>(base_dir, base_port, node_idx, cluster, params, std::move(document_types), slobrok_port); +} + +} diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_node.h b/searchcore/src/vespa/searchcore/bmcluster/bm_node.h new file mode 100644 index 00000000000..3647981f58b --- /dev/null +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_node.h @@ -0,0 +1,53 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <memory> +#include <vespa/searchcore/proton/common/doctypename.h> + +namespace document { + +class Bucket; +class DocumentTypeRepo; +class DocumentType; +class Field; + +}; + +namespace document::internal { class InternalDocumenttypesType; } + +namespace storage::spi { struct PersistenceProvider; } + +namespace search::bmcluster { + +class BmCluster; +class BmClusterParams; +class IBmFeedHandler; + +/* + * Class representing a single benchmark node in a benchmark cluster. + */ +class BmNode { +protected: + + BmNode(); +public: + virtual ~BmNode(); + virtual void initialize_persistence_provider() = 0; + virtual void create_bucket(const document::Bucket& bucket) = 0; + virtual void start_service_layer(const BmClusterParams& params) = 0; + virtual void wait_service_layer() = 0; + virtual void start_distributor(const BmClusterParams& params) = 0; + virtual void create_feed_handler(const BmClusterParams& params) = 0; + virtual void shutdown_feed_handler() = 0; + virtual void shutdown_distributor() = 0; + virtual void shutdown_service_layer() = 0; + virtual void wait_service_layer_slobrok() = 0; + virtual void wait_distributor_slobrok() = 0; + virtual IBmFeedHandler* get_feed_handler() = 0; + virtual storage::spi::PersistenceProvider *get_persistence_provider() = 0; + static unsigned int num_ports(); + static std::unique_ptr<BmNode> create(const vespalib::string &base_dir, int base_port, unsigned int node_idx, BmCluster& cluster, const BmClusterParams& params, std::shared_ptr<const document::internal::InternalDocumenttypesType> document_types, int slobrok_port); +}; + +} diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_range.h b/searchcore/src/vespa/searchcore/bmcluster/bm_range.h new file mode 100644 index 00000000000..fda2b1c52b6 --- /dev/null +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_range.h @@ -0,0 +1,24 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +namespace search::bmcluster { + +/* + * Range of document "keys" used to generate documents + */ +class BmRange +{ + uint32_t _start; + uint32_t _end; +public: + BmRange(uint32_t start_in, uint32_t end_in) + : _start(start_in), + _end(end_in) + { + } + uint32_t get_start() const { return _start; } + uint32_t get_end() const { return _end; } +}; + +} diff --git a/searchcore/src/apps/vespa-feed-bm/bm_storage_chain_builder.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_chain_builder.cpp index bbe0de70ce2..16883e1cc48 100644 --- a/searchcore/src/apps/vespa-feed-bm/bm_storage_chain_builder.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_chain_builder.cpp @@ -7,7 +7,7 @@ #include <vespa/log/log.h> LOG_SETUP(".bm_storage_chain_builder"); -namespace feedbm { +namespace search::bmcluster { BmStorageChainBuilder::BmStorageChainBuilder() : storage::StorageChainBuilder(), diff --git a/searchcore/src/apps/vespa-feed-bm/bm_storage_chain_builder.h b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_chain_builder.h index bba933da9e0..c61cb200c36 100644 --- a/searchcore/src/apps/vespa-feed-bm/bm_storage_chain_builder.h +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_chain_builder.h @@ -4,7 +4,7 @@ #include <vespa/storage/common/storage_chain_builder.h> -namespace feedbm { +namespace search::bmcluster { struct BmStorageLinkContext; diff --git a/searchcore/src/apps/vespa-feed-bm/bm_storage_link.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_link.cpp index 2aeda91c30c..c251c25b15d 100644 --- a/searchcore/src/apps/vespa-feed-bm/bm_storage_link.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_link.cpp @@ -3,7 +3,7 @@ #include "bm_storage_link.h" #include "pending_tracker.h" -namespace feedbm { +namespace search::bmcluster { BmStorageLink::BmStorageLink() diff --git a/searchcore/src/apps/vespa-feed-bm/bm_storage_link.h b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_link.h index 95528d7b2d9..8c98479a38b 100644 --- a/searchcore/src/apps/vespa-feed-bm/bm_storage_link.h +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_link.h @@ -6,7 +6,7 @@ #include "pending_tracker_hash.h" #include <vespa/storage/common/storagelink.h> -namespace feedbm { +namespace search::bmcluster { class PendingTracker; diff --git a/searchcore/src/apps/vespa-feed-bm/bm_storage_link_context.h b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_link_context.h index f2df20f1f66..f7cc1841770 100644 --- a/searchcore/src/apps/vespa-feed-bm/bm_storage_link_context.h +++ b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_link_context.h @@ -1,6 +1,6 @@ // Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -namespace feedbm { +namespace search::bmcluster { class BmStorageLink; diff --git a/searchcore/src/apps/vespa-feed-bm/bucket_info_queue.cpp b/searchcore/src/vespa/searchcore/bmcluster/bucket_info_queue.cpp index fc43402d68e..6670707ed39 100644 --- a/searchcore/src/apps/vespa-feed-bm/bucket_info_queue.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/bucket_info_queue.cpp @@ -3,7 +3,7 @@ #include "bucket_info_queue.h" #include <vespa/persistence/spi/persistenceprovider.h> -namespace feedbm { +namespace search::bmcluster { BucketInfoQueue::BucketInfoQueue(storage::spi::PersistenceProvider& provider, std::atomic<uint32_t>& errors) : _mutex(), diff --git a/searchcore/src/apps/vespa-feed-bm/bucket_info_queue.h b/searchcore/src/vespa/searchcore/bmcluster/bucket_info_queue.h index 07a55127234..1a48f9fa478 100644 --- a/searchcore/src/apps/vespa-feed-bm/bucket_info_queue.h +++ b/searchcore/src/vespa/searchcore/bmcluster/bucket_info_queue.h @@ -9,7 +9,7 @@ namespace storage::spi { struct PersistenceProvider; } -namespace feedbm { +namespace search::bmcluster { /* * Class containing a queue of buckets where mutating feed operations diff --git a/searchcore/src/vespa/searchcore/bmcluster/bucket_selector.h b/searchcore/src/vespa/searchcore/bmcluster/bucket_selector.h new file mode 100644 index 00000000000..9549bf71401 --- /dev/null +++ b/searchcore/src/vespa/searchcore/bmcluster/bucket_selector.h @@ -0,0 +1,28 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +namespace search::bmcluster { + +/* + * Map from document index to bucket to ensure even spread between buckets + * while ensuring that each bucket used belong to a specific thread. + */ +class BucketSelector +{ + uint32_t _thread_id; + uint32_t _threads; + uint32_t _num_buckets; +public: + BucketSelector(uint32_t thread_id_in, uint32_t threads_in, uint32_t num_buckets_in) + : _thread_id(thread_id_in), + _threads(threads_in), + _num_buckets((num_buckets_in / _threads) * _threads) + { + } + uint64_t operator()(uint32_t i) const { + return (static_cast<uint64_t>(i) * _threads + _thread_id) % _num_buckets; + } +}; + +} diff --git a/searchcore/src/apps/vespa-feed-bm/document_api_message_bus_bm_feed_handler.cpp b/searchcore/src/vespa/searchcore/bmcluster/document_api_message_bus_bm_feed_handler.cpp index 38c8490de69..c6f2626f27c 100644 --- a/searchcore/src/apps/vespa-feed-bm/document_api_message_bus_bm_feed_handler.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/document_api_message_bus_bm_feed_handler.cpp @@ -17,7 +17,7 @@ using document::DocumentUpdate; using storage::api::StorageMessageAddress; using storage::lib::NodeType; -namespace feedbm { +namespace search::bmcluster { namespace { vespalib::string _Storage("storage"); diff --git a/searchcore/src/apps/vespa-feed-bm/document_api_message_bus_bm_feed_handler.h b/searchcore/src/vespa/searchcore/bmcluster/document_api_message_bus_bm_feed_handler.h index c71bb113c5b..5358e0a948b 100644 --- a/searchcore/src/apps/vespa-feed-bm/document_api_message_bus_bm_feed_handler.h +++ b/searchcore/src/vespa/searchcore/bmcluster/document_api_message_bus_bm_feed_handler.h @@ -9,7 +9,7 @@ namespace document { class DocumentTypeRepo; } namespace documentapi { class DocumentMessage; }; namespace storage::api { class StorageMessageAddress; } -namespace feedbm { +namespace search::bmcluster { class BmMessageBus; diff --git a/searchcore/src/apps/vespa-feed-bm/i_bm_feed_handler.h b/searchcore/src/vespa/searchcore/bmcluster/i_bm_feed_handler.h index 26cbf27b455..fc3953c49a5 100644 --- a/searchcore/src/apps/vespa-feed-bm/i_bm_feed_handler.h +++ b/searchcore/src/vespa/searchcore/bmcluster/i_bm_feed_handler.h @@ -12,7 +12,7 @@ class DocumentUpdate; class DocumentId; } -namespace feedbm { +namespace search::bmcluster { class BucketInfoQueue; class PendingTracker; diff --git a/searchcore/src/apps/vespa-feed-bm/pending_tracker.cpp b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker.cpp index 94bed4cb3bd..247bf8bece3 100644 --- a/searchcore/src/apps/vespa-feed-bm/pending_tracker.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker.cpp @@ -7,7 +7,7 @@ using namespace std::chrono_literals; -namespace feedbm { +namespace search::bmcluster { PendingTracker::PendingTracker(uint32_t limit) : _pending(0u), diff --git a/searchcore/src/apps/vespa-feed-bm/pending_tracker.h b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker.h index 4ca84ab7442..a8fa2f77396 100644 --- a/searchcore/src/apps/vespa-feed-bm/pending_tracker.h +++ b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker.h @@ -7,7 +7,7 @@ namespace storage::spi { struct PersistenceProvider; } -namespace feedbm { +namespace search::bmcluster { class BucketInfoQueue; diff --git a/searchcore/src/apps/vespa-feed-bm/pending_tracker_hash.cpp b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker_hash.cpp index 6863d35703e..515f7f6b2de 100644 --- a/searchcore/src/apps/vespa-feed-bm/pending_tracker_hash.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker_hash.cpp @@ -5,7 +5,7 @@ #include <vespa/vespalib/stllike/hash_map.hpp> #include <cassert> -namespace feedbm { +namespace search::bmcluster { PendingTrackerHash::PendingTrackerHash() : _mutex(), diff --git a/searchcore/src/apps/vespa-feed-bm/pending_tracker_hash.h b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker_hash.h index 89be93fd4ed..de9b6f63aa4 100644 --- a/searchcore/src/apps/vespa-feed-bm/pending_tracker_hash.h +++ b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker_hash.h @@ -5,7 +5,7 @@ #include <vespa/vespalib/stllike/hash_map.h> #include <mutex> -namespace feedbm { +namespace search::bmcluster { class PendingTracker; diff --git a/searchcore/src/apps/vespa-feed-bm/spi_bm_feed_handler.cpp b/searchcore/src/vespa/searchcore/bmcluster/spi_bm_feed_handler.cpp index 11149eecb3f..e905b493cf4 100644 --- a/searchcore/src/apps/vespa-feed-bm/spi_bm_feed_handler.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/spi_bm_feed_handler.cpp @@ -15,7 +15,7 @@ using storage::spi::Bucket; using storage::spi::PersistenceProvider; using storage::spi::Timestamp; -namespace feedbm { +namespace search::bmcluster { namespace { @@ -124,12 +124,6 @@ SpiBmFeedHandler::get(const document::Bucket& bucket, vespalib::stringref field_ } void -SpiBmFeedHandler::create_bucket(const document::Bucket& bucket) -{ - _provider.createBucket(Bucket(bucket), context); -} - -void SpiBmFeedHandler::attach_bucket_info_queue(PendingTracker& tracker) { if (!_skip_get_spi_bucket_info) { diff --git a/searchcore/src/apps/vespa-feed-bm/spi_bm_feed_handler.h b/searchcore/src/vespa/searchcore/bmcluster/spi_bm_feed_handler.h index a78aa06628b..bbc9e3b8e74 100644 --- a/searchcore/src/apps/vespa-feed-bm/spi_bm_feed_handler.h +++ b/searchcore/src/vespa/searchcore/bmcluster/spi_bm_feed_handler.h @@ -8,7 +8,7 @@ namespace document { class FieldSetRepo; } namespace storage::spi { struct PersistenceProvider; } -namespace feedbm { +namespace search::bmcluster { /* * Benchmark feed handler for feed directly to persistence provider @@ -27,7 +27,6 @@ public: void update(const document::Bucket& bucket, std::unique_ptr<document::DocumentUpdate> document_update, uint64_t timestamp, PendingTracker& tracker) override; void remove(const document::Bucket& bucket, const document::DocumentId& document_id, uint64_t timestamp, PendingTracker& tracker) override; void get(const document::Bucket& bucket, vespalib::stringref field_set_string, const document::DocumentId& document_id, PendingTracker& tracker) override; - void create_bucket(const document::Bucket& bucket); void attach_bucket_info_queue(PendingTracker &tracker) override; uint32_t get_error_count() const override; const vespalib::string &get_name() const override; diff --git a/searchcore/src/apps/vespa-feed-bm/storage_api_chain_bm_feed_handler.cpp b/searchcore/src/vespa/searchcore/bmcluster/storage_api_chain_bm_feed_handler.cpp index 82cf2df065f..34669b8cbdc 100644 --- a/searchcore/src/apps/vespa-feed-bm/storage_api_chain_bm_feed_handler.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/storage_api_chain_bm_feed_handler.cpp @@ -18,7 +18,7 @@ using document::Document; using document::DocumentId; using document::DocumentUpdate; -namespace feedbm { +namespace search::bmcluster { namespace { diff --git a/searchcore/src/apps/vespa-feed-bm/storage_api_chain_bm_feed_handler.h b/searchcore/src/vespa/searchcore/bmcluster/storage_api_chain_bm_feed_handler.h index 0c4b715122e..1c196d746eb 100644 --- a/searchcore/src/apps/vespa-feed-bm/storage_api_chain_bm_feed_handler.h +++ b/searchcore/src/vespa/searchcore/bmcluster/storage_api_chain_bm_feed_handler.h @@ -6,7 +6,7 @@ namespace storage::api { class StorageCommand; } -namespace feedbm { +namespace search::bmcluster { struct BmStorageLinkContext; diff --git a/searchcore/src/apps/vespa-feed-bm/storage_api_message_bus_bm_feed_handler.cpp b/searchcore/src/vespa/searchcore/bmcluster/storage_api_message_bus_bm_feed_handler.cpp index f63a8e33cc0..04561b5d93e 100644 --- a/searchcore/src/apps/vespa-feed-bm/storage_api_message_bus_bm_feed_handler.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/storage_api_message_bus_bm_feed_handler.cpp @@ -15,7 +15,7 @@ using document::DocumentUpdate; using storage::api::StorageMessageAddress; using storage::lib::NodeType; -namespace feedbm { +namespace search::bmcluster { namespace { vespalib::string _Storage("storage"); diff --git a/searchcore/src/apps/vespa-feed-bm/storage_api_message_bus_bm_feed_handler.h b/searchcore/src/vespa/searchcore/bmcluster/storage_api_message_bus_bm_feed_handler.h index 2aafd0c6830..0027f260b8f 100644 --- a/searchcore/src/apps/vespa-feed-bm/storage_api_message_bus_bm_feed_handler.h +++ b/searchcore/src/vespa/searchcore/bmcluster/storage_api_message_bus_bm_feed_handler.h @@ -12,7 +12,7 @@ class StorageCommand; class StorageMessageAddress; } -namespace feedbm { +namespace search::bmcluster { class BmMessageBus; diff --git a/searchcore/src/apps/vespa-feed-bm/storage_api_rpc_bm_feed_handler.cpp b/searchcore/src/vespa/searchcore/bmcluster/storage_api_rpc_bm_feed_handler.cpp index 04d49bba0a3..3e0426cb308 100644 --- a/searchcore/src/apps/vespa-feed-bm/storage_api_rpc_bm_feed_handler.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/storage_api_rpc_bm_feed_handler.cpp @@ -22,7 +22,7 @@ using storage::rpc::SharedRpcResources; using storage::rpc::StorageApiRpcService; using storage::lib::NodeType; -namespace feedbm { +namespace search::bmcluster { namespace { vespalib::string _Storage("storage"); diff --git a/searchcore/src/apps/vespa-feed-bm/storage_api_rpc_bm_feed_handler.h b/searchcore/src/vespa/searchcore/bmcluster/storage_api_rpc_bm_feed_handler.h index 5057d8889a5..360f702e590 100644 --- a/searchcore/src/apps/vespa-feed-bm/storage_api_rpc_bm_feed_handler.h +++ b/searchcore/src/vespa/searchcore/bmcluster/storage_api_rpc_bm_feed_handler.h @@ -16,7 +16,7 @@ class MessageCodecProvider; class SharedRpcResources; } -namespace feedbm { +namespace search::bmcluster { /* * Benchmark feed handler for feed to service layer or distributor diff --git a/searchcore/src/apps/vespa-feed-bm/storage_reply_error_checker.cpp b/searchcore/src/vespa/searchcore/bmcluster/storage_reply_error_checker.cpp index 260b0c8a7af..ec1ebec2954 100644 --- a/searchcore/src/apps/vespa-feed-bm/storage_reply_error_checker.cpp +++ b/searchcore/src/vespa/searchcore/bmcluster/storage_reply_error_checker.cpp @@ -6,7 +6,7 @@ #include <vespa/log/log.h> LOG_SETUP(".storage_reply_error_checker"); -namespace feedbm { +namespace search::bmcluster { StorageReplyErrorChecker::StorageReplyErrorChecker() : _errors(0u) diff --git a/searchcore/src/apps/vespa-feed-bm/storage_reply_error_checker.h b/searchcore/src/vespa/searchcore/bmcluster/storage_reply_error_checker.h index 4743367b426..2fcb6aad14a 100644 --- a/searchcore/src/apps/vespa-feed-bm/storage_reply_error_checker.h +++ b/searchcore/src/vespa/searchcore/bmcluster/storage_reply_error_checker.h @@ -6,7 +6,7 @@ namespace storage::api { class StorageMessage; } -namespace feedbm { +namespace search::bmcluster { class StorageReplyErrorChecker { protected: diff --git a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp index c17bdf06854..397347b7651 100644 --- a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp +++ b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp @@ -17,6 +17,7 @@ #include <vespa/searchlib/tensor/hnsw_index.h> #include <vespa/searchlib/tensor/nearest_neighbor_index.h> #include <vespa/searchlib/tensor/nearest_neighbor_index_factory.h> +#include <vespa/searchlib/tensor/nearest_neighbor_index_loader.h> #include <vespa/searchlib/tensor/nearest_neighbor_index_saver.h> #include <vespa/searchlib/tensor/serialized_fast_value_attribute.h> #include <vespa/searchlib/tensor/tensor_attribute.h> @@ -51,6 +52,7 @@ using search::tensor::HnswIndex; using search::tensor::HnswNode; using search::tensor::NearestNeighborIndex; using search::tensor::NearestNeighborIndexFactory; +using search::tensor::NearestNeighborIndexLoader; using search::tensor::NearestNeighborIndexSaver; using search::tensor::PrepareResult; using search::tensor::TensorAttribute; @@ -89,6 +91,24 @@ public: } }; +class MockIndexLoader : public NearestNeighborIndexLoader { +private: + int& _index_value; + std::unique_ptr<search::fileutil::LoadedBuffer> _buf; + +public: + MockIndexLoader(int& index_value, + std::unique_ptr<search::fileutil::LoadedBuffer> buf) + : _index_value(index_value), + _buf(std::move(buf)) + {} + bool load_next() override { + ASSERT_EQUAL(sizeof(int), _buf->size()); + _index_value = (reinterpret_cast<const int*>(_buf->buffer()))[0]; + return false; + } +}; + class MockPrepareResult : public PrepareResult { public: uint32_t docid; @@ -220,10 +240,8 @@ public: } return std::unique_ptr<NearestNeighborIndexSaver>(); } - bool load(const search::fileutil::LoadedBuffer& buf) override { - ASSERT_EQUAL(sizeof(int), buf.size()); - _index_value = (reinterpret_cast<const int*>(buf.buffer()))[0]; - return true; + std::unique_ptr<NearestNeighborIndexLoader> make_loader(std::unique_ptr<search::fileutil::LoadedBuffer> buf) override { + return std::make_unique<MockIndexLoader>(_index_value, std::move(buf)); } std::vector<Neighbor> find_top_k(uint32_t k, vespalib::eval::TypedCells vector, uint32_t explore_k, double distance_threshold) const override diff --git a/searchlib/src/tests/tensor/hnsw_saver/hnsw_save_load_test.cpp b/searchlib/src/tests/tensor/hnsw_saver/hnsw_save_load_test.cpp index 2db6437664e..74b82649c98 100644 --- a/searchlib/src/tests/tensor/hnsw_saver/hnsw_save_load_test.cpp +++ b/searchlib/src/tests/tensor/hnsw_saver/hnsw_save_load_test.cpp @@ -103,9 +103,8 @@ public: return vector_writer.output; } void load_copy(std::vector<char> data) { - HnswIndexLoader loader(copy); - LoadedBuffer buffer(&data[0], data.size()); - loader.load(buffer); + HnswIndexLoader loader(copy, std::make_unique<LoadedBuffer>(&data[0], data.size())); + while (loader.load_next()) {} } void expect_copy_as_populated() const { diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp index 7c05699b8e1..fd86fbf1c73 100644 --- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp +++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp @@ -3,6 +3,7 @@ #include "dense_tensor_attribute.h" #include "dense_tensor_attribute_saver.h" #include "nearest_neighbor_index.h" +#include "nearest_neighbor_index_loader.h" #include "nearest_neighbor_index_saver.h" #include "tensor_attribute.hpp" #include <vespa/eval/eval/value.h> @@ -10,10 +11,11 @@ #include <vespa/searchlib/attribute/load_utils.h> #include <vespa/searchlib/attribute/readerbase.h> #include <vespa/vespalib/data/slime/inserter.h> +#include <vespa/vespalib/util/exceptions.h> +#include <vespa/vespalib/util/lambdatask.h> #include <vespa/vespalib/util/memory_allocator.h> #include <vespa/vespalib/util/mmap_file_allocator_factory.h> #include <vespa/vespalib/util/threadstackexecutor.h> -#include <vespa/vespalib/util/lambdatask.h> #include <thread> #include <vespa/log/log.h> @@ -29,6 +31,7 @@ namespace search::tensor { namespace { constexpr uint32_t DENSE_TENSOR_ATTRIBUTE_VERSION = 1; +constexpr uint32_t LOAD_COMMIT_INTERVAL = 256; const vespalib::string tensorTypeTag("tensortype"); class BlobSequenceReader : public ReaderBase @@ -266,7 +269,7 @@ private: _attr.setCommittedDocIdLimit(std::max(_attr.getCommittedDocIdLimit(), lid + 1)); _attr._index->complete_add_document(lid, std::move(prepared)); --_pending; - if ((lid % 256) == 0) { + if ((lid % LOAD_COMMIT_INTERVAL) == 0) { _attr.commit(); }; } @@ -319,7 +322,7 @@ public: // This ensures that get_vector() (via getTensor()) is able to find the newly added tensor. _attr.setCommittedDocIdLimit(lid + 1); _attr._index->add_document(lid); - if ((lid % 256) == 0) { + if ((lid % LOAD_COMMIT_INTERVAL) == 0) { _attr.commit(); } } @@ -375,7 +378,17 @@ DenseTensorAttribute::onLoad(vespalib::Executor *executor) setCommittedDocIdLimit(numDocs); if (_index && use_index_file) { auto buffer = LoadUtils::loadFile(*this, DenseTensorAttributeSaver::index_file_suffix()); - if (!_index->load(*buffer)) { + try { + auto index_loader = _index->make_loader(std::move(buffer)); + size_t cnt = 0; + while (index_loader->load_next()) { + if ((++cnt % LOAD_COMMIT_INTERVAL) == 0) { + commit(); + } + } + } catch (const vespalib::IoException& ex) { + LOG(error, "IoException while loading nearest neighbor index for tensor attribute '%s': %s", + getName().c_str(), ex.what()); return false; } } diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp index 49aa64212ae..8da8c4ba01f 100644 --- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp +++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp @@ -1,16 +1,17 @@ // Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#include "bitvector_visited_tracker.h" #include "distance_function.h" +#include "hash_set_visited_tracker.h" #include "hnsw_index.h" #include "hnsw_index_loader.h" #include "hnsw_index_saver.h" #include "random_level_generator.h" -#include "bitvector_visited_tracker.h" -#include "hash_set_visited_tracker.h" #include "reusable_set_visited_tracker.h" #include <vespa/searchcommon/common/compaction_strategy.h> #include <vespa/searchlib/attribute/address_space_components.h> #include <vespa/searchlib/attribute/address_space_usage.h> +#include <vespa/searchlib/util/fileutil.h> #include <vespa/searchlib/util/state_explorer_utils.h> #include <vespa/vespalib/data/slime/cursor.h> #include <vespa/vespalib/data/slime/inserter.h> @@ -694,12 +695,11 @@ HnswIndex::make_saver() const return std::make_unique<HnswIndexSaver>(_graph); } -bool -HnswIndex::load(const fileutil::LoadedBuffer& buf) +std::unique_ptr<NearestNeighborIndexLoader> +HnswIndex::make_loader(std::unique_ptr<fileutil::LoadedBuffer> buf) { assert(get_entry_docid() == 0); // cannot load after index has data - HnswIndexLoader loader(_graph); - return loader.load(buf); + return std::make_unique<HnswIndexLoader>(_graph, std::move(buf)); } struct NeighborsByDocId { diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h index 4503459a88a..4cb7afd1a24 100644 --- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h +++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h @@ -183,7 +183,7 @@ public: void shrink_lid_space(uint32_t doc_id_limit) override; std::unique_ptr<NearestNeighborIndexSaver> make_saver() const override; - bool load(const fileutil::LoadedBuffer& buf) override; + std::unique_ptr<NearestNeighborIndexLoader> make_loader(std::unique_ptr<fileutil::LoadedBuffer> buf) override; std::vector<Neighbor> find_top_k(uint32_t k, TypedCells vector, uint32_t explore_k, double distance_threshold) const override; diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.cpp b/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.cpp index c0aec9ff91a..53b702a4d79 100644 --- a/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.cpp +++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.cpp @@ -6,45 +6,64 @@ namespace search::tensor { +void +HnswIndexLoader::init() +{ + size_t num_readable = _buf->size(sizeof(uint32_t)); + _ptr = static_cast<const uint32_t *>(_buf->buffer()); + _end = _ptr + num_readable; + _entry_docid = next_int(); + _entry_level = next_int(); + _num_nodes = next_int(); +} + HnswIndexLoader::~HnswIndexLoader() {} -HnswIndexLoader::HnswIndexLoader(HnswGraph &graph) - : _graph(graph), _ptr(nullptr), _end(nullptr), _failed(false) + +HnswIndexLoader::HnswIndexLoader(HnswGraph& graph, std::unique_ptr<fileutil::LoadedBuffer> buf) + : _graph(graph), + _buf(std::move(buf)), + _ptr(nullptr), + _end(nullptr), + _entry_docid(0), + _entry_level(0), + _num_nodes(0), + _docid(0), + _link_array(), + _complete(false) { + init(); } bool -HnswIndexLoader::load(const fileutil::LoadedBuffer& buf) +HnswIndexLoader::load_next() { - size_t num_readable = buf.size(sizeof(uint32_t)); - _ptr = static_cast<const uint32_t *>(buf.buffer()); - _end = _ptr + num_readable; - uint32_t entry_docid = next_int(); - int32_t entry_level = next_int(); - uint32_t num_nodes = next_int(); - std::vector<uint32_t> link_array; - for (uint32_t docid = 0; docid < num_nodes; ++docid) { + assert(!_complete); + if (_docid < _num_nodes) { uint32_t num_levels = next_int(); if (num_levels > 0) { - _graph.make_node_for_document(docid, num_levels); + _graph.make_node_for_document(_docid, num_levels); for (uint32_t level = 0; level < num_levels; ++level) { uint32_t num_links = next_int(); - link_array.clear(); + _link_array.clear(); while (num_links-- > 0) { - link_array.push_back(next_int()); + _link_array.push_back(next_int()); } - _graph.set_link_array(docid, level, link_array); + _graph.set_link_array(_docid, level, _link_array); } } } - if (_failed) return false; - _graph.node_refs.ensure_size(std::max(num_nodes, 1u)); - _graph.node_refs_size.store(std::max(num_nodes, 1u), std::memory_order_release); - _graph.trim_node_refs_size(); - auto entry_node_ref = _graph.get_node_ref(entry_docid); - _graph.set_entry_node({entry_docid, entry_node_ref, entry_level}); - return true; + if (++_docid < _num_nodes) { + return true; + } else { + _graph.node_refs.ensure_size(std::max(_num_nodes, 1u)); + _graph.node_refs_size.store(std::max(_num_nodes, 1u), std::memory_order_release); + _graph.trim_node_refs_size(); + auto entry_node_ref = _graph.get_node_ref(_entry_docid); + _graph.set_entry_node({_entry_docid, entry_node_ref, _entry_level}); + _complete = true; + return false; + } } - } diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.h b/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.h index 9f5ae66011f..0b6658e42ec 100644 --- a/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.h +++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.h @@ -2,7 +2,11 @@ #pragma once +#include "nearest_neighbor_index_loader.h" +#include <vespa/vespalib/util/exceptions.h> #include <cstdint> +#include <memory> +#include <vector> namespace search::fileutil { class LoadedBuffer; } @@ -13,23 +17,34 @@ struct HnswGraph; /** * Implements loading of HNSW graph structure from binary format. **/ -class HnswIndexLoader { -public: - HnswIndexLoader(HnswGraph &graph); - ~HnswIndexLoader(); - bool load(const fileutil::LoadedBuffer& buf); +class HnswIndexLoader : public NearestNeighborIndexLoader { private: - HnswGraph &_graph; - const uint32_t *_ptr; - const uint32_t *_end; - bool _failed; + HnswGraph& _graph; + std::unique_ptr<fileutil::LoadedBuffer> _buf; + const uint32_t* _ptr; + const uint32_t* _end; + uint32_t _entry_docid; + int32_t _entry_level; + uint32_t _num_nodes; + uint32_t _docid; + std::vector<uint32_t> _link_array; + bool _complete; + + void init(); uint32_t next_int() { if (__builtin_expect((_ptr == _end), false)) { - _failed = true; - return 0; + throw vespalib::IoException + (vespalib::IoException::createMessage("Already at the end of buffer when trying to get next int", + vespalib::IoException::CORRUPT_DATA), + vespalib::IoException::CORRUPT_DATA, ""); } return *_ptr++; } + +public: + HnswIndexLoader(HnswGraph& graph, std::unique_ptr<fileutil::LoadedBuffer> buf); + virtual ~HnswIndexLoader(); + bool load_next() override; }; } diff --git a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h index b8f30a53ddf..f75cdae8a92 100644 --- a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h +++ b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h @@ -22,6 +22,7 @@ class CompactionStrategy; namespace search::tensor { +class NearestNeighborIndexLoader; class NearestNeighborIndexSaver; /** @@ -77,7 +78,13 @@ public: * and the caller ensures that an attribute read guard is held during the lifetime of the saver. */ virtual std::unique_ptr<NearestNeighborIndexSaver> make_saver() const = 0; - virtual bool load(const fileutil::LoadedBuffer& buf) = 0; + + /** + * Creates a loader that is used to load the index from the given buffer. + * + * This might throw vespalib::IoException. + */ + virtual std::unique_ptr<NearestNeighborIndexLoader> make_loader(std::unique_ptr<fileutil::LoadedBuffer> buf) = 0; virtual std::vector<Neighbor> find_top_k(uint32_t k, vespalib::eval::TypedCells vector, diff --git a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index_loader.h b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index_loader.h new file mode 100644 index 00000000000..703f8f863d1 --- /dev/null +++ b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index_loader.h @@ -0,0 +1,23 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +namespace search::tensor { + +/** + * Interface that is used to load a nearest neighbor index from binary form. + */ +class NearestNeighborIndexLoader { +public: + virtual ~NearestNeighborIndexLoader() {} + + /** + * Loads the next part of the index (e.g. the node corresponding to a given document) + * and returns whether there is more data to load. + * + * This might throw vespalib::IoException. + */ + virtual bool load_next() = 0; +}; + +} diff --git a/slobrok/CMakeLists.txt b/slobrok/CMakeLists.txt index 6acbf5d5134..c6c6313cf68 100644 --- a/slobrok/CMakeLists.txt +++ b/slobrok/CMakeLists.txt @@ -19,6 +19,7 @@ vespa_define_module( TESTS src/tests/backoff src/tests/configure + src/tests/local_rpc_monitor_map src/tests/mirrorapi src/tests/registerapi src/tests/service_map_history diff --git a/slobrok/src/apps/slobrok/slobrok.cpp b/slobrok/src/apps/slobrok/slobrok.cpp index 63212745644..b2748762a12 100644 --- a/slobrok/src/apps/slobrok/slobrok.cpp +++ b/slobrok/src/apps/slobrok/slobrok.cpp @@ -50,6 +50,7 @@ App::Main() { uint32_t portnum = 2773; vespalib::string cfgId; + bool useNewLogic = false; int argi = 1; const char* optArg; @@ -63,7 +64,7 @@ App::Main() portnum = atoi(optArg); break; case 'N': - // ignore flag for now + useNewLogic = true; break; default: LOG(error, "unknown option letter '%c'", c); @@ -75,11 +76,11 @@ App::Main() if (cfgId.empty()) { LOG(debug, "no config id specified"); ConfigShim shim(portnum); - mainobj = std::make_unique<SBEnv>(shim); + mainobj = std::make_unique<SBEnv>(shim, useNewLogic); } else { ConfigShim shim(portnum, cfgId); shim.enableStateServer(true); - mainobj = std::make_unique<SBEnv>(shim); + mainobj = std::make_unique<SBEnv>(shim, useNewLogic); } hook_sigterm(); res = mainobj->MainLoop(); diff --git a/slobrok/src/tests/local_rpc_monitor_map/CMakeLists.txt b/slobrok/src/tests/local_rpc_monitor_map/CMakeLists.txt new file mode 100644 index 00000000000..aa30021939c --- /dev/null +++ b/slobrok/src/tests/local_rpc_monitor_map/CMakeLists.txt @@ -0,0 +1,9 @@ +# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +vespa_add_executable(slobrok_local_rpc_monitor_map_test_app TEST + SOURCES + local_rpc_monitor_map_test.cpp + DEPENDS + slobrok_slobrokserver + GTest::GTest +) +vespa_add_test(NAME slobrok_local_rpc_monitor_map_test_app COMMAND slobrok_local_rpc_monitor_map_test_app) diff --git a/slobrok/src/tests/local_rpc_monitor_map/local_rpc_monitor_map_test.cpp b/slobrok/src/tests/local_rpc_monitor_map/local_rpc_monitor_map_test.cpp new file mode 100644 index 00000000000..9782f6ccbdc --- /dev/null +++ b/slobrok/src/tests/local_rpc_monitor_map/local_rpc_monitor_map_test.cpp @@ -0,0 +1,331 @@ +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/vespalib/gtest/gtest.h> +#include <vespa/slobrok/server/local_rpc_monitor_map.h> +#include <vespa/vespalib/util/stringfmt.h> +#include <vespa/vespalib/util/time.h> +#include <vespa/fnet/scheduler.h> +#include <map> + +using namespace vespalib; +using namespace slobrok; +using vespalib::make_string_short::fmt; + +struct MapCall { + vespalib::string name; + ServiceMapping mapping; + ServiceMapping old; + static MapCall add(const ServiceMapping &m) { return {"add", m, {"",""}}; } + static MapCall remove(const ServiceMapping &m) { return {"remove", m, {"",""}}; } + static MapCall update(const ServiceMapping &o, const ServiceMapping &m) { return {"update", m, o}; } + void check(const MapCall &rhs) const { + EXPECT_EQ(name, rhs.name); + EXPECT_EQ(mapping, rhs.mapping); + EXPECT_EQ(old, rhs.old); + } + ~MapCall(); +}; +MapCall::~MapCall() = default; + +struct MonitorCall { + vespalib::string name; + ServiceMapping mapping; + bool hurry; + static MonitorCall start(const ServiceMapping &m, bool h) { return {"start", m, h}; } + static MonitorCall stop(const ServiceMapping &m) { return {"stop", m, false}; } + void check(const MonitorCall &rhs) const { + EXPECT_EQ(name, rhs.name); + EXPECT_EQ(mapping, rhs.mapping); + EXPECT_EQ(hurry, rhs.hurry); + } + ~MonitorCall(); +}; +MonitorCall::~MonitorCall() = default; + +template <typename Call> +class CallLog { +private: + std::vector<Call> _calls; + size_t _checked; +public: + CallLog() noexcept : _calls(), _checked(0) {} + ~CallLog() { EXPECT_EQ(_calls.size(), _checked); } + void log(Call call) { _calls.push_back(call); } + void expect(std::initializer_list<Call> list) { + ASSERT_EQ(list.size(), (_calls.size() - _checked)); + for (const auto &call: list) { + call.check(_calls[_checked++]); + } + } +}; + +struct MapLog : CallLog<MapCall>, MapListener { + void add(const ServiceMapping &mapping) override { + log(MapCall::add(mapping)); + } + void remove(const ServiceMapping &mapping) override { + log(MapCall::remove(mapping)); + } + void update(const ServiceMapping &old_mapping, + const ServiceMapping &new_mapping) override + { + log(MapCall::update(old_mapping, new_mapping)); + } +}; + +struct MonitorLog : CallLog<MonitorCall>, MappingMonitor { + void start(const ServiceMapping& mapping, bool hurry) override { + log(MonitorCall::start(mapping, hurry)); + } + void stop(const ServiceMapping& mapping) override { + log(MonitorCall::stop(mapping)); + } +}; + +struct MyMappingMonitor : MappingMonitor { + MonitorLog &monitor; + MyMappingMonitor(MonitorLog &m) : monitor(m) {} + void start(const ServiceMapping& mapping, bool hurry) override { + monitor.start(mapping, hurry); + } + void stop(const ServiceMapping& mapping) override { + monitor.stop(mapping); + } +}; + +struct LocalRpcMonitorMapTest : public ::testing::Test { + steady_time time; + FNET_Scheduler scheduler; + MonitorLog monitor_log; + MapLog map_log; + LocalRpcMonitorMap map; + std::unique_ptr<MapSubscription> subscription; + ServiceMapping mapping; + ServiceMapping mapping_conflict; + LocalRpcMonitorMapTest() + : time(duration::zero()), + scheduler(&time, &time), monitor_log(), map_log(), + map(&scheduler, [this](auto &owner) + { + EXPECT_EQ(&owner, &map); + return std::make_unique<MyMappingMonitor>(monitor_log); + }), + subscription(MapSubscription::subscribe(map.dispatcher(), map_log)), + mapping("dummy_service", "dummy_spec"), + mapping_conflict("dummy_service", "conflicting_dummy_spec") + {} + void tick(duration elapsed = FNET_Scheduler::tick_ms) { + time += elapsed; + scheduler.CheckTasks(); + } + void add_mapping(const ServiceMapping &m, bool is_up) { + map.add(m); // <- add from consensus map + monitor_log.expect({}); + tick(0ms); // <- process delayed add event + monitor_log.expect({MonitorCall::start(m, false)}); + map_log.expect({}); + if (is_up) { + map.up(m); // <- up from monitor + map_log.expect({MapCall::add(m)}); + } else { + map.down(m); // <- down from monitor + map_log.expect({}); + } + } + void flip_up_state(const ServiceMapping &m, bool was_up, size_t cnt) { + for (size_t i = 0; i < cnt; ++i) { + if (was_up) { + map.up(m); + map_log.expect({}); + map.down(m); + map_log.expect({MapCall::remove(m)}); + } else { + map.down(m); + map_log.expect({}); + map.up(m); + map_log.expect({MapCall::add(m)}); + } + was_up = !was_up; + } + monitor_log.expect({}); + } + void remove_mapping(const ServiceMapping &m, bool was_up) { + map.remove(m); // <- remove from consensus map + monitor_log.expect({}); + tick(0ms); // <- process delayed remove event + monitor_log.expect({MonitorCall::stop(m)}); + if (was_up) { + map_log.expect({MapCall::remove(m)}); + } else { + map_log.expect({}); + } + } + ~LocalRpcMonitorMapTest(); +}; +LocalRpcMonitorMapTest::~LocalRpcMonitorMapTest() = default; + +struct MyAddLocalHandler : LocalRpcMonitorMap::AddLocalCompletionHandler { + std::unique_ptr<OkState> &state; + bool &handler_deleted; + MyAddLocalHandler(std::unique_ptr<OkState> &s, bool &hd) + : state(s), handler_deleted(hd) {} + void doneHandler(OkState result) override { + state = std::make_unique<OkState>(result); + } + ~MyAddLocalHandler() override { + handler_deleted = true; + } +}; + +TEST_F(LocalRpcMonitorMapTest, external_add_remove_while_up) { + add_mapping(mapping, true); + remove_mapping(mapping, true); +} + +TEST_F(LocalRpcMonitorMapTest, external_add_remove_while_down) { + add_mapping(mapping, false); + remove_mapping(mapping, false); +} + +TEST_F(LocalRpcMonitorMapTest, server_up_down_up_down) { + add_mapping(mapping, true); + flip_up_state(mapping, true, 3); + remove_mapping(mapping, false); +} + +TEST_F(LocalRpcMonitorMapTest, server_down_up_down_up) { + add_mapping(mapping, false); + flip_up_state(mapping, false, 3); + remove_mapping(mapping, true); +} + +TEST_F(LocalRpcMonitorMapTest, multi_mapping) { + ServiceMapping m1("dummy_service1", "dummy_spec1"); + ServiceMapping m2("dummy_service2", "dummy_spec2"); + ServiceMapping m3("dummy_service3", "dummy_spec3"); + add_mapping(m1, true); + add_mapping(m2, false); + add_mapping(m3, true); + flip_up_state(m1, true, 3); + flip_up_state(m2, false, 3); + flip_up_state(m3, true, 3); + remove_mapping(m1, false); + remove_mapping(m2, true); + remove_mapping(m3, false); +} + +TEST_F(LocalRpcMonitorMapTest, local_add_ok) { + std::unique_ptr<OkState> state; + bool handler_deleted; + map.addLocal(mapping, std::make_unique<MyAddLocalHandler>(state, handler_deleted)); + monitor_log.expect({MonitorCall::start(mapping, true)}); + map_log.expect({}); + map.up(mapping); + monitor_log.expect({}); + map_log.expect({MapCall::add(mapping)}); + ASSERT_TRUE(state); + EXPECT_TRUE(state->ok()); + ASSERT_TRUE(handler_deleted); +} + +TEST_F(LocalRpcMonitorMapTest, local_add_already_up) { + std::unique_ptr<OkState> state; + bool handler_deleted; + add_mapping(mapping, true); + map.addLocal(mapping, std::make_unique<MyAddLocalHandler>(state, handler_deleted)); + monitor_log.expect({}); + map_log.expect({}); + ASSERT_TRUE(state); + EXPECT_TRUE(state->ok()); + ASSERT_TRUE(handler_deleted); +} + +TEST_F(LocalRpcMonitorMapTest, local_add_unknown_comes_up) { + std::unique_ptr<OkState> state; + bool handler_deleted; + add_mapping(mapping, false); + map.addLocal(mapping, std::make_unique<MyAddLocalHandler>(state, handler_deleted)); + monitor_log.expect({MonitorCall::stop(mapping), MonitorCall::start(mapping, true)}); + map_log.expect({}); + EXPECT_FALSE(state); + map.up(mapping); + map_log.expect({MapCall::add(mapping)}); + ASSERT_TRUE(state); + EXPECT_TRUE(state->ok()); + ASSERT_TRUE(handler_deleted); +} + +TEST_F(LocalRpcMonitorMapTest, local_add_unknown_goes_down) { + std::unique_ptr<OkState> state; + bool handler_deleted; + add_mapping(mapping, false); + map.addLocal(mapping, std::make_unique<MyAddLocalHandler>(state, handler_deleted)); + monitor_log.expect({MonitorCall::stop(mapping), MonitorCall::start(mapping, true)}); + map_log.expect({}); + EXPECT_FALSE(state); + map.down(mapping); + map_log.expect({}); + ASSERT_TRUE(state); + EXPECT_FALSE(state->ok()); + ASSERT_TRUE(handler_deleted); +} + +TEST_F(LocalRpcMonitorMapTest, local_add_conflict) { + std::unique_ptr<OkState> state; + bool handler_deleted; + add_mapping(mapping, true); + map.addLocal(mapping_conflict, std::make_unique<MyAddLocalHandler>(state, handler_deleted)); + monitor_log.expect({}); + map_log.expect({}); + ASSERT_TRUE(state); + EXPECT_TRUE(state->failed()); + ASSERT_TRUE(handler_deleted); +} + +TEST_F(LocalRpcMonitorMapTest, local_multi_add) { + std::unique_ptr<OkState> state1; + bool handler_deleted1; + std::unique_ptr<OkState> state2; + bool handler_deleted2; + map.addLocal(mapping, std::make_unique<MyAddLocalHandler>(state1, handler_deleted1)); + monitor_log.expect({MonitorCall::start(mapping, true)}); + map.addLocal(mapping, std::make_unique<MyAddLocalHandler>(state2, handler_deleted2)); + monitor_log.expect({}); + map_log.expect({}); + EXPECT_FALSE(state1); + EXPECT_FALSE(state2); + map.up(mapping); + monitor_log.expect({}); + map_log.expect({MapCall::add(mapping)}); + ASSERT_TRUE(state1); + ASSERT_TRUE(state2); + EXPECT_TRUE(state1->ok()); + EXPECT_TRUE(state2->ok()); + ASSERT_TRUE(handler_deleted1); + ASSERT_TRUE(handler_deleted2); +} + +TEST_F(LocalRpcMonitorMapTest, local_remove) { + add_mapping(mapping, true); + map.removeLocal(mapping); + monitor_log.expect({MonitorCall::stop(mapping), MonitorCall::start(mapping, false)}); + map_log.expect({MapCall::remove(mapping)}); + map.up(mapping); // timeout case (should normally not happen) + map_log.expect({MapCall::add(mapping)}); +} + +TEST_F(LocalRpcMonitorMapTest, local_add_local_remove) { + std::unique_ptr<OkState> state; + bool handler_deleted; + map.addLocal(mapping, std::make_unique<MyAddLocalHandler>(state, handler_deleted)); + monitor_log.expect({MonitorCall::start(mapping, true)}); + map_log.expect({}); + map.removeLocal(mapping); + monitor_log.expect({MonitorCall::stop(mapping)}); + map_log.expect({}); + ASSERT_TRUE(state); + EXPECT_TRUE(state->failed()); + ASSERT_TRUE(handler_deleted); +} + +GTEST_MAIN_RUN_ALL_TESTS() diff --git a/slobrok/src/vespa/slobrok/server/cmd.cpp b/slobrok/src/vespa/slobrok/server/cmd.cpp index b809f655a9d..df856189d89 100644 --- a/slobrok/src/vespa/slobrok/server/cmd.cpp +++ b/slobrok/src/vespa/slobrok/server/cmd.cpp @@ -62,6 +62,15 @@ ScriptCommand::makeIgnoreCmd(SBEnv &env, const std::string & name, const std::st return ScriptCommand(std::move(data)); } +ScriptCommand +ScriptCommand::makeRegCompleter(SBEnv &env, + const std::string &name, const std::string &spec, + FRT_RPCRequest *req) +{ + auto data = std::make_unique<ScriptData>(env, name, spec, req); + data->_state = ScriptData::XCH_DOADD; + return ScriptCommand(std::move(data)); +} void ScriptCommand::doRequest() @@ -124,7 +133,9 @@ ScriptCommand::doneHandler(OkState result) LOG(debug, "done doAdd(%s,%s)", name_p, spec_p); data._state = ScriptData::RDC_INVAL; // all OK - data.registerRequest->Return(); + if (data.registerRequest != nullptr) { + data.registerRequest->Return(); + } cleanupReservation(data); return; } else if (data._state == ScriptData::XCH_IGNORE) { diff --git a/slobrok/src/vespa/slobrok/server/cmd.h b/slobrok/src/vespa/slobrok/server/cmd.h index d790ae93f5c..e7f42f75e42 100644 --- a/slobrok/src/vespa/slobrok/server/cmd.h +++ b/slobrok/src/vespa/slobrok/server/cmd.h @@ -26,6 +26,7 @@ public: static ScriptCommand makeRegRpcSrvCmd(SBEnv &env, const std::string &name, const std::string &spec, FRT_RPCRequest *req); static ScriptCommand makeIgnoreCmd(SBEnv &env, const std::string &name, const std::string &spec); + static ScriptCommand makeRegCompleter(SBEnv &env, const std::string &name, const std::string &spec, FRT_RPCRequest *req); void doneHandler(OkState result); void doRequest(); diff --git a/slobrok/src/vespa/slobrok/server/exchange_manager.cpp b/slobrok/src/vespa/slobrok/server/exchange_manager.cpp index 18da01ee526..ccfb8d3bd63 100644 --- a/slobrok/src/vespa/slobrok/server/exchange_manager.cpp +++ b/slobrok/src/vespa/slobrok/server/exchange_manager.cpp @@ -127,17 +127,19 @@ ExchangeManager::diffLists(const ServiceMappingList &lhs, const ServiceMappingLi void ExchangeManager::healthCheck() { - auto oldWorldServices = env().rpcServerMap().allManaged(); - ServiceMappingList oldWorldList; - for (const auto *nsp : oldWorldServices) { - oldWorldList.emplace_back(nsp->getName(), nsp->getSpec()); - } - std::sort(oldWorldList.begin(), oldWorldList.end()); auto newWorldList = env().consensusMap().currentConsensus(); - vespalib::string diff = diffLists(oldWorldList, newWorldList); - if (! diff.empty()) { - LOG(warning, "Diff from old world rpcServerMap to new world consensus map: %s", - diff.c_str()); + if (! _env.useNewLogic()) { + auto oldWorldServices = env().rpcServerMap().allManaged(); + ServiceMappingList oldWorldList; + for (const auto *nsp : oldWorldServices) { + oldWorldList.emplace_back(nsp->getName(), nsp->getSpec()); + } + std::sort(oldWorldList.begin(), oldWorldList.end()); + vespalib::string diff = diffLists(oldWorldList, newWorldList); + if (! diff.empty()) { + LOG(warning, "Diff from old world rpcServerMap to new world consensus map: %s", + diff.c_str()); + } } for (const auto & [ name, partner ] : _partners) { partner->maybeStartFetch(); @@ -145,7 +147,7 @@ ExchangeManager::healthCheck() auto remoteList = partner->remoteMap().allMappings(); // 0 is expected (when remote is down) if (remoteList.size() != 0) { - diff = diffLists(newWorldList, remoteList); + vespalib::string diff = diffLists(newWorldList, remoteList); if (! diff.empty()) { LOG(warning, "Diff from consensus map to peer slobrok mirror: %s", diff.c_str()); diff --git a/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.cpp b/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.cpp index 454d123eead..16e47371cbb 100644 --- a/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.cpp +++ b/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.cpp @@ -10,6 +10,26 @@ namespace slobrok { #pragma GCC diagnostic ignored "-Winline" +namespace { + +struct ChainedAddLocalCompletionHandler : LocalRpcMonitorMap::AddLocalCompletionHandler { + std::unique_ptr<AddLocalCompletionHandler> first; + std::unique_ptr<AddLocalCompletionHandler> second; + + ChainedAddLocalCompletionHandler(std::unique_ptr<AddLocalCompletionHandler> f, + std::unique_ptr<AddLocalCompletionHandler> s) + : first(std::move(f)), second(std::move(s)) + {} + + void doneHandler(OkState result) override { + first->doneHandler(result); + second->doneHandler(result); + } + ~ChainedAddLocalCompletionHandler() override {} +}; + +} + void LocalRpcMonitorMap::DelayedTasks::PerformTask() { std::vector<Event> todo; std::swap(todo, _queue); @@ -25,9 +45,9 @@ void LocalRpcMonitorMap::DelayedTasks::PerformTask() { } } -LocalRpcMonitorMap::LocalRpcMonitorMap(FRT_Supervisor &supervisor, +LocalRpcMonitorMap::LocalRpcMonitorMap(FNET_Scheduler *scheduler, MappingMonitorFactory mappingMonitorFactory) - : _delayedTasks(supervisor.GetScheduler(), *this), + : _delayedTasks(scheduler, *this), _map(), _dispatcher(), _history(), @@ -82,18 +102,37 @@ ServiceMapHistory & LocalRpcMonitorMap::history() { return _history; } +bool LocalRpcMonitorMap::wouldConflict(const ServiceMapping &mapping) const { + auto iter = _map.find(mapping.name); + if (iter == _map.end()) { + return false; // no mapping, no conflict + } + return (iter->second.spec != mapping.spec); +} + void LocalRpcMonitorMap::addLocal(const ServiceMapping &mapping, - std::unique_ptr<ScriptCommand> inflight) + std::unique_ptr<AddLocalCompletionHandler> inflight) { LOG(debug, "try local add: mapping %s->%s", mapping.name.c_str(), mapping.spec.c_str()); auto old = _map.find(mapping.name); if (old != _map.end()) { - const PerService & exists = old->second; + PerService & exists = old->second; if (exists.spec == mapping.spec) { LOG(debug, "added mapping %s->%s was already present", mapping.name.c_str(), mapping.spec.c_str()); - inflight->doneHandler(OkState(0, "already registered")); + if (exists.up) { + inflight->doneHandler(OkState(0, "already registered")); + } else if (exists.inflight) { + auto newInflight = std::make_unique<ChainedAddLocalCompletionHandler>( + std::move(exists.inflight), + std::move(inflight)); + exists.inflight = std::move(newInflight); + } else { + _mappingMonitor->stop(mapping); + exists.inflight = std::move(inflight); + _mappingMonitor->start(mapping, true); + } return; } LOG(warning, "tried addLocal for mapping %s->%s, but already had conflicting mapping %s->%s", @@ -105,6 +144,43 @@ void LocalRpcMonitorMap::addLocal(const ServiceMapping &mapping, addToMap(mapping, localService(mapping, std::move(inflight)), true); } +void LocalRpcMonitorMap::removeLocal(const ServiceMapping &mapping) { + LOG(debug, "try local remove: mapping %s->%s", + mapping.name.c_str(), mapping.spec.c_str()); + auto old = _map.find(mapping.name); + if (old == _map.end()) { + return; // already removed, OK + } + PerService & exists = old->second; + if (exists.spec != mapping.spec) { + LOG(warning, "tried removeLocal for mapping %s->%s, but already had conflicting mapping %s->%s", + mapping.name.c_str(), mapping.spec.c_str(), + mapping.name.c_str(), exists.spec.c_str()); + return; // unregister for old, conflicting mapping + } + if (exists.localOnly) { + // we can just remove it + auto removed = removeFromMap(old); + if (removed.inflight) { + auto target = std::move(removed.inflight); + target->doneHandler(OkState(13, "removed during initialization")); + } + if (removed.up) { + _dispatcher.remove(removed.mapping); + } + return; + } + // also exists in consensus map, so we can't just remove it + // instead, pretend it's down and delay next ping + _mappingMonitor->stop(mapping); + if (exists.up) { + exists.up = false; + _dispatcher.remove(mapping); + } + _mappingMonitor->start(mapping, false); + return; +} + void LocalRpcMonitorMap::add(const ServiceMapping &mapping) { _delayedTasks.handleLater(Event::add(mapping)); } diff --git a/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.h b/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.h index 3b2c74648d2..e3d081eacc9 100644 --- a/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.h +++ b/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.h @@ -27,6 +27,13 @@ namespace slobrok { class LocalRpcMonitorMap : public MapListener, public MappingMonitorOwner { +public: + // Interface used to signal the result of addLocal + struct AddLocalCompletionHandler { + virtual void doneHandler(OkState result) = 0; + virtual ~AddLocalCompletionHandler() {} + }; + private: enum class EventType { ADD, REMOVE }; @@ -66,12 +73,12 @@ private: struct PerService { bool up; bool localOnly; - std::unique_ptr<ScriptCommand> inflight; + std::unique_ptr<AddLocalCompletionHandler> inflight; vespalib::string spec; }; PerService localService(const ServiceMapping &mapping, - std::unique_ptr<ScriptCommand> inflight) + std::unique_ptr<AddLocalCompletionHandler> inflight) { return PerService{ .up = false, @@ -109,22 +116,27 @@ private: ServiceMapping mapping; bool up; bool localOnly; - std::unique_ptr<ScriptCommand> inflight; + std::unique_ptr<AddLocalCompletionHandler> inflight; }; RemovedData removeFromMap(Map::iterator iter); public: - LocalRpcMonitorMap(FRT_Supervisor &supervisor, + LocalRpcMonitorMap(FNET_Scheduler *scheduler, MappingMonitorFactory mappingMonitorFactory); ~LocalRpcMonitorMap(); MapSource &dispatcher() { return _dispatcher; } ServiceMapHistory & history(); + bool wouldConflict(const ServiceMapping &mapping) const; + /** for use by register API, will call doneHandler() on inflight script */ void addLocal(const ServiceMapping &mapping, - std::unique_ptr<ScriptCommand> inflight); + std::unique_ptr<AddLocalCompletionHandler> inflight); + + /** for use by unregister API */ + void removeLocal(const ServiceMapping &mapping); void add(const ServiceMapping &mapping) override; void remove(const ServiceMapping &mapping) override; diff --git a/slobrok/src/vespa/slobrok/server/rpchooks.cpp b/slobrok/src/vespa/slobrok/server/rpchooks.cpp index 4ed173deaa1..ab1d5246ddc 100644 --- a/slobrok/src/vespa/slobrok/server/rpchooks.cpp +++ b/slobrok/src/vespa/slobrok/server/rpchooks.cpp @@ -37,6 +37,12 @@ public: ~MetricsReport() override { Kill(); } }; +struct ScriptCommandWrapper : LocalRpcMonitorMap::AddLocalCompletionHandler { + ScriptCommand script; + ScriptCommandWrapper(ScriptCommand &&script_in) : script(std::move(script_in)) {} + void doneHandler(OkState result) override { script.doneHandler(result); } +}; + } // namespace <unnamed> //----------------------------------------------------------------------------- @@ -215,6 +221,10 @@ RPCHooks::initRPC(FRT_Supervisor *supervisor) } +bool RPCHooks::useNewLogic() const { + return _env.useNewLogic(); +} + void RPCHooks::rpc_listNamesServed(FRT_RPCRequest *req) { @@ -228,6 +238,9 @@ RPCHooks::rpc_listNamesServed(FRT_RPCRequest *req) void RPCHooks::rpc_registerRpcServer(FRT_RPCRequest *req) { + if (useNewLogic()) { + return new_registerRpcServer(req); + } FRT_Values &args = *req->GetParams(); const char *dName = args[0]._string._str; const char *dSpec = args[1]._string._str; @@ -238,7 +251,7 @@ RPCHooks::rpc_registerRpcServer(FRT_RPCRequest *req) // TODO: run only this path, and complete the request instead of ignoring auto script = ScriptCommand::makeIgnoreCmd(_env, dName, dSpec); ServiceMapping mapping{dName, dSpec}; - _env.localMonitorMap().addLocal(mapping, std::make_unique<ScriptCommand>(std::move(script))); + _env.localMonitorMap().addLocal(mapping, std::make_unique<ScriptCommandWrapper>(std::move(script))); } // is this already OK? if (_rpcsrvmanager.alreadyManaged(dName, dSpec)) { @@ -260,9 +273,34 @@ RPCHooks::rpc_registerRpcServer(FRT_RPCRequest *req) completer.doRequest(); } +void RPCHooks::new_registerRpcServer(FRT_RPCRequest *req) { + FRT_Values &args = *req->GetParams(); + const char *dName = args[0]._string._str; + const char *dSpec = args[1]._string._str; + LOG(debug, "RPC: invoked registerRpcServer(%s,%s)", dName, dSpec); + _cnts.registerReqs++; + ServiceMapping mapping{dName, dSpec}; + // can we say now, that this will fail? + if (_env.consensusMap().wouldConflict(mapping)) { + req->SetError(FRTE_RPC_METHOD_FAILED, "conflict detected"); + LOG(info, "cannot register %s at %s: conflict", dName, dSpec); + return; + } + auto script = ScriptCommand::makeRegCompleter(_env, dName, dSpec, req); + req->Detach(); + _env.localMonitorMap().addLocal(mapping, std::make_unique<ScriptCommandWrapper>(std::move(script))); + // TODO: remove this + script = ScriptCommand::makeRegRpcSrvCmd(_env, dName, dSpec, nullptr); + script.doRequest(); + return; +} + void RPCHooks::rpc_unregisterRpcServer(FRT_RPCRequest *req) { + if (useNewLogic()) { + return new_unregisterRpcServer(req); + } FRT_Values &args = *req->GetParams(); const char *dName = args[0]._string._str; const char *dSpec = args[1]._string._str; @@ -278,6 +316,17 @@ RPCHooks::rpc_unregisterRpcServer(FRT_RPCRequest *req) return; } +void RPCHooks::new_unregisterRpcServer(FRT_RPCRequest *req) { + FRT_Values &args = *req->GetParams(); + const char *dName = args[0]._string._str; + const char *dSpec = args[1]._string._str; + ServiceMapping mapping{dName, dSpec}; + _env.localMonitorMap().removeLocal(mapping); + _env.exchangeManager().forwardRemove(dName, dSpec); + LOG(debug, "unregisterRpcServer(%s,%s)", dName, dSpec); + _cnts.otherReqs++; + return; +} void RPCHooks::rpc_addPeer(FRT_RPCRequest *req) @@ -322,6 +371,9 @@ RPCHooks::rpc_removePeer(FRT_RPCRequest *req) void RPCHooks::rpc_wantAdd(FRT_RPCRequest *req) { + if (useNewLogic()) { + return new_wantAdd(req); + } FRT_Values &args = *req->GetParams(); const char *remsb = args[0]._string._str; const char *dName = args[1]._string._str; @@ -341,10 +393,38 @@ RPCHooks::rpc_wantAdd(FRT_RPCRequest *req) return; } +void RPCHooks::new_wantAdd(FRT_RPCRequest *req) { + FRT_Values &args = *req->GetParams(); + const char *remsb = args[0]._string._str; + const char *dName = args[1]._string._str; + const char *dSpec = args[2]._string._str; + FRT_Values &retval = *req->GetReturn(); + ServiceMapping mapping{dName, dSpec}; + bool conflict = ( + _env.consensusMap().wouldConflict(mapping) + || + _env.localMonitorMap().wouldConflict(mapping) + ); + if (conflict) { + retval.AddInt32(13); + retval.AddString("conflict detected"); + req->SetError(FRTE_RPC_METHOD_FAILED, "conflict detected"); + } else { + retval.AddInt32(0); + retval.AddString("ok"); + } + LOG(debug, "%s->wantAdd(%s,%s) %s", + remsb, dName, dSpec, conflict ? "conflict" : "OK"); + _cnts.wantAddReqs++; + return; +} void RPCHooks::rpc_doRemove(FRT_RPCRequest *req) { + if (useNewLogic()) { + return new_doRemove(req); + } FRT_Values &args = *req->GetParams(); const char *rname = args[0]._string._str; const char *dname = args[1]._string._str; @@ -364,9 +444,27 @@ RPCHooks::rpc_doRemove(FRT_RPCRequest *req) return; } +void RPCHooks::new_doRemove(FRT_RPCRequest *req) { + FRT_Values &args = *req->GetParams(); + const char *rname = args[0]._string._str; + const char *dname = args[1]._string._str; + const char *dspec = args[2]._string._str; + FRT_Values &retval = *req->GetReturn(); + ServiceMapping mapping{dname, dspec}; + _env.localMonitorMap().removeLocal(mapping); + retval.AddInt32(0); + retval.AddString("ok"); + LOG(debug, "%s->doRemove(%s,%s)", rname, dname, dspec); + _cnts.doRemoveReqs++; + return; +} + void RPCHooks::rpc_doAdd(FRT_RPCRequest *req) { + if (useNewLogic()) { + return new_doAdd(req); + } FRT_Values &args = *req->GetParams(); const char *rname = args[0]._string._str; const char *dname = args[1]._string._str; @@ -386,6 +484,28 @@ RPCHooks::rpc_doAdd(FRT_RPCRequest *req) return; } +void RPCHooks::new_doAdd(FRT_RPCRequest *req) { + FRT_Values &args = *req->GetParams(); + const char *remsb = args[0]._string._str; + const char *dName = args[1]._string._str; + const char *dSpec = args[2]._string._str; + FRT_Values &retval = *req->GetReturn(); + ServiceMapping mapping{dName, dSpec}; + bool ok = true; + if (_env.consensusMap().wouldConflict(mapping)) { + retval.AddInt32(13); + retval.AddString("conflict detected"); + req->SetError(FRTE_RPC_METHOD_FAILED, "conflict detected"); + ok = false; + } else { + retval.AddInt32(0); + retval.AddString("ok"); + } + LOG(debug, "%s->doAdd(%s,%s) %s", + remsb, dName, dSpec, ok ? "OK" : "failed"); + _cnts.doAddReqs++; + return; +} void RPCHooks::rpc_lookupRpcServer(FRT_RPCRequest *req) @@ -455,6 +575,7 @@ RPCHooks::rpc_lookupManaged(FRT_RPCRequest *req) FRT_Values &args = *req->GetParams(); const char *name = args[0]._string._str; LOG(debug, "RPC: lookupManaged(%s)", name); + // TODO: use local history here const auto & visible = _env.globalHistory(); auto diff = visible.makeDiffFrom(0); for (const auto & entry : diff.updated) { diff --git a/slobrok/src/vespa/slobrok/server/rpchooks.h b/slobrok/src/vespa/slobrok/server/rpchooks.h index a41e473b183..e8f6c65ea47 100644 --- a/slobrok/src/vespa/slobrok/server/rpchooks.h +++ b/slobrok/src/vespa/slobrok/server/rpchooks.h @@ -58,8 +58,16 @@ public: void countFailedHeartbeat() { _cnts.heartBeatFails++; } private: + bool useNewLogic() const; + void rpc_lookupRpcServer(FRT_RPCRequest *req); + void new_registerRpcServer(FRT_RPCRequest *req); + void new_unregisterRpcServer(FRT_RPCRequest *req); + void new_wantAdd(FRT_RPCRequest *req); + void new_doRemove(FRT_RPCRequest *req); + void new_doAdd(FRT_RPCRequest *req); + void rpc_registerRpcServer(FRT_RPCRequest *req); void rpc_unregisterRpcServer(FRT_RPCRequest *req); diff --git a/slobrok/src/vespa/slobrok/server/sbenv.cpp b/slobrok/src/vespa/slobrok/server/sbenv.cpp index 1f54716c29c..ebb9935877f 100644 --- a/slobrok/src/vespa/slobrok/server/sbenv.cpp +++ b/slobrok/src/vespa/slobrok/server/sbenv.cpp @@ -97,12 +97,15 @@ ConfigTask::PerformTask() } // namespace slobrok::<unnamed> -SBEnv::SBEnv(const ConfigShim &shim) +SBEnv::SBEnv(const ConfigShim &shim) : SBEnv(shim, false) {} + +SBEnv::SBEnv(const ConfigShim &shim, bool useNewConsensusLogic) : _transport(std::make_unique<FNET_Transport>(TransportConfig().drop_empty_buffers(true))), _supervisor(std::make_unique<FRT_Supervisor>(_transport.get())), _configShim(shim), _configurator(shim.factory().create(*this)), _shuttingDown(false), + _useNewLogic(useNewConsensusLogic), _partnerList(), _me(createSpec(_configShim.portNumber())), _rpcHooks(*this, _rpcsrvmap, _rpcsrvmanager), @@ -110,7 +113,7 @@ SBEnv::SBEnv(const ConfigShim &shim) _health(), _metrics(_rpcHooks, *_transport), _components(), - _localRpcMonitorMap(*_supervisor, + _localRpcMonitorMap(getScheduler(), [this] (MappingMonitorOwner &owner) { return std::make_unique<RpcMappingMonitor>(*_supervisor, owner); }), @@ -118,11 +121,19 @@ SBEnv::SBEnv(const ConfigShim &shim) _exchanger(*this, _rpcsrvmap), _rpcsrvmap() { + if (useNewLogic()) { + srandom(time(nullptr) ^ getpid()); + // note: feedback loop between these two: + _localMonitorSubscription = MapSubscription::subscribe(_consensusMap, _localRpcMonitorMap); + _consensusSubscription = MapSubscription::subscribe(_localRpcMonitorMap.dispatcher(), _consensusMap); + _globalHistorySubscription = MapSubscription::subscribe(_consensusMap, _globalVisibleHistory); + _rpcHooks.initRPC(getSupervisor()); + return; + } srandom(time(nullptr) ^ getpid()); // note: feedback loop between these two: _localMonitorSubscription = MapSubscription::subscribe(_consensusMap, _localRpcMonitorMap); _consensusSubscription = MapSubscription::subscribe(_localRpcMonitorMap.dispatcher(), _consensusMap); - // TODO: use consensus as source here: _globalHistorySubscription = MapSubscription::subscribe(_rpcsrvmap.proxy(), _globalVisibleHistory); _rpcHooks.initRPC(getSupervisor()); } diff --git a/slobrok/src/vespa/slobrok/server/sbenv.h b/slobrok/src/vespa/slobrok/server/sbenv.h index 44b7305814c..c6fd8905131 100644 --- a/slobrok/src/vespa/slobrok/server/sbenv.h +++ b/slobrok/src/vespa/slobrok/server/sbenv.h @@ -44,6 +44,7 @@ private: ConfigShim _configShim; Configurator::UP _configurator; bool _shuttingDown; + const bool _useNewLogic; SBEnv(const SBEnv &); // Not used SBEnv &operator=(const SBEnv &); // Not used @@ -71,6 +72,7 @@ private: public: explicit SBEnv(const ConfigShim &shim); + SBEnv(const ConfigShim &shim, bool useNewConsensusLogic); ~SBEnv(); FNET_Transport *getTransport() { return _transport.get(); } @@ -105,6 +107,7 @@ public: bool isSuspended() const { return false; } bool isShuttingDown() const { return _shuttingDown; } + bool useNewLogic() const { return _useNewLogic; } int MainLoop(); diff --git a/slobrok/src/vespa/slobrok/server/union_service_map.cpp b/slobrok/src/vespa/slobrok/server/union_service_map.cpp index baf94a6fa69..9abfc237d56 100644 --- a/slobrok/src/vespa/slobrok/server/union_service_map.cpp +++ b/slobrok/src/vespa/slobrok/server/union_service_map.cpp @@ -20,6 +20,19 @@ ServiceMappingList UnionServiceMap::currentConsensus() const { return result; } +bool UnionServiceMap::wouldConflict(const ServiceMapping &mapping) const { + const vespalib::string &key = mapping.name; + auto iter = _mappings.find(key); + if (iter == _mappings.end()) { + return false; + } + const Mappings &values = iter->second; + if (values.size() != 1) { + return true; + } + return (values[0].spec != mapping.spec); +} + void UnionServiceMap::add(const ServiceMapping &mapping) { const vespalib::string &key = mapping.name; diff --git a/slobrok/src/vespa/slobrok/server/union_service_map.h b/slobrok/src/vespa/slobrok/server/union_service_map.h index d5bcbfaed94..67d3221849d 100644 --- a/slobrok/src/vespa/slobrok/server/union_service_map.h +++ b/slobrok/src/vespa/slobrok/server/union_service_map.h @@ -36,6 +36,8 @@ public: ServiceMappingList currentConsensus() const; + bool wouldConflict(const ServiceMapping &mapping) const; + void add(const ServiceMapping &mapping) override; void remove(const ServiceMapping &mapping) override; void update(const ServiceMapping &old_mapping, diff --git a/standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java b/standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java index df31e454cd7..ed7d30c476f 100644 --- a/standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java +++ b/standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java @@ -4,11 +4,11 @@ package com.yahoo.container.standalone; import com.yahoo.config.FileReference; import com.yahoo.config.application.api.FileRegistry; import com.yahoo.filedistribution.fileacquirer.FileAcquirer; -import com.yahoo.net.HostName; +import net.jpountz.lz4.LZ4FrameOutputStream; import java.io.File; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; +import java.io.FileOutputStream; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Path; import java.util.HashMap; @@ -24,9 +24,6 @@ import java.util.stream.Collectors; * @author ollivir */ public class LocalFileDb implements FileAcquirer, FileRegistry { - - private static final Constructor<FileReference> fileReferenceConstructor = createFileReferenceConstructor(); - private final Map<FileReference, File> fileReferenceToFile = new HashMap<>(); private final Path appPath; @@ -57,12 +54,7 @@ public class LocalFileDb implements FileAcquirer, FileRegistry { throw new RuntimeException("The file does not exist: " + file.getPath()); } - FileReference fileReference = null; - try { - fileReference = fileReferenceConstructor.newInstance("LocalFileDb:" + relativePath); - } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { - throw new RuntimeException("Unable to create new FileReference", e); - } + FileReference fileReference = new FileReference("LocalFileDb:" + relativePath); fileReferenceToFile.put(fileReference, file); return fileReference; } @@ -80,16 +72,24 @@ public class LocalFileDb implements FileAcquirer, FileRegistry { @Override public FileReference addBlob(String name, ByteBuffer blob) { - throw new RuntimeException("addBlob(String name, ByteBuffer blob) is not implemented here."); + writeBlob(blob, name); + File file = appPath.resolve(name).toFile(); + FileReference fileReference = new FileReference("LocalFileDb:" + name); + fileReferenceToFile.put(fileReference, file); + return fileReference; } - private static Constructor<FileReference> createFileReferenceConstructor() { - try { - Constructor<FileReference> method = FileReference.class.getDeclaredConstructor(String.class); - method.setAccessible(true); - return method; - } catch (NoSuchMethodException ex) { - throw new IllegalStateException(ex); + private void writeBlob(ByteBuffer blob, String relativePath) { + try (FileOutputStream fos = new FileOutputStream(new File(appPath.toFile(), relativePath))) { + if (relativePath.endsWith(".lz4")) { + LZ4FrameOutputStream lz4 = new LZ4FrameOutputStream(fos); + lz4.write(blob.array(), blob.arrayOffset(), blob.remaining()); + lz4.close(); + } else { + fos.write(blob.array(), blob.arrayOffset(), blob.remaining()); + } + } catch (IOException e) { + throw new IllegalArgumentException("Failed writing temp file", e); } } diff --git a/standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainer.java b/standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainer.java index a00ffd8b985..5d093aa2609 100644 --- a/standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainer.java +++ b/standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainer.java @@ -3,7 +3,6 @@ package com.yahoo.container.standalone; import com.yahoo.collections.Pair; import com.yahoo.config.model.ConfigModelRepo; -import com.yahoo.config.model.producer.AbstractConfigProducerRoot; import com.yahoo.io.IOUtils; import com.yahoo.vespa.model.VespaModel; import com.yahoo.vespa.model.container.xml.ContainerModelBuilder.Networking; @@ -22,9 +21,6 @@ import java.util.List; * @author ollivir */ public class StandaloneContainer { - public static String firstContainerId(AbstractConfigProducerRoot root) { - return root.getConfigProducer("container").get().getConfigId(); - } interface ThrowingFunction<T, U> { U apply(T input) throws Exception; diff --git a/storage/src/tests/distributor/CMakeLists.txt b/storage/src/tests/distributor/CMakeLists.txt index 678c19d4c6f..67a7fed8d0b 100644 --- a/storage/src/tests/distributor/CMakeLists.txt +++ b/storage/src/tests/distributor/CMakeLists.txt @@ -47,6 +47,7 @@ vespa_add_executable(storage_distributor_gtest_runner_app TEST statoperationtest.cpp statusreporterdelegatetest.cpp throttlingoperationstartertest.cpp + top_level_bucket_db_updater_test.cpp top_level_distributor_test.cpp top_level_distributor_test_util.cpp twophaseupdateoperationtest.cpp diff --git a/storage/src/tests/distributor/blockingoperationstartertest.cpp b/storage/src/tests/distributor/blockingoperationstartertest.cpp index 861f8e72832..72cc0e1ba9f 100644 --- a/storage/src/tests/distributor/blockingoperationstartertest.cpp +++ b/storage/src/tests/distributor/blockingoperationstartertest.cpp @@ -122,7 +122,7 @@ BlockingOperationStarterTest::SetUp() _compReg = std::make_unique<StorageComponentRegisterImpl>(); _compReg->setClock(_clock); _clock.setAbsoluteTimeInSeconds(1); - _messageTracker = std::make_unique<PendingMessageTracker>(*_compReg); + _messageTracker = std::make_unique<PendingMessageTracker>(*_compReg, 0); _fake_ctx = std::make_unique<FakeDistributorStripeOperationContext>(*_messageTracker); _operation_sequencer = std::make_unique<OperationSequencer>(); _operationStarter = std::make_unique<BlockingOperationStarter>(*_fake_ctx, *_operation_sequencer, *_starterImpl); diff --git a/storage/src/tests/distributor/distributor_stripe_test.cpp b/storage/src/tests/distributor/distributor_stripe_test.cpp index 547e7f02b02..a61ea61854e 100644 --- a/storage/src/tests/distributor/distributor_stripe_test.cpp +++ b/storage/src/tests/distributor/distributor_stripe_test.cpp @@ -870,7 +870,7 @@ DistributorStripeTest::set_up_and_start_get_op_with_stale_reads_enabled(bool ena _stripe->handle_or_enqueue_message(make_dummy_get_command_for_bucket_1()); } -TEST_F(DistributorStripeTest, gets_are_started_outside_main_distributor_logic_if_stale_reads_enabled) +TEST_F(DistributorStripeTest, gets_are_started_outside_main_stripe_logic_if_stale_reads_enabled) { set_up_and_start_get_op_with_stale_reads_enabled(true); ASSERT_THAT(_sender.commands(), SizeIs(1)); @@ -883,7 +883,7 @@ TEST_F(DistributorStripeTest, gets_are_started_outside_main_distributor_logic_if EXPECT_THAT(_sender.replies(), SizeIs(1)); } -TEST_F(DistributorStripeTest, gets_are_not_started_outside_main_distributor_logic_if_stale_reads_disabled) +TEST_F(DistributorStripeTest, gets_are_not_started_outside_main_stripe_logic_if_stale_reads_disabled) { set_up_and_start_get_op_with_stale_reads_enabled(false); // Get has been placed into distributor queue, so no external messages are produced. @@ -894,7 +894,7 @@ TEST_F(DistributorStripeTest, gets_are_not_started_outside_main_distributor_logi // There's no need or desire to track "lockfree" Gets in the main pending message tracker, // as we only have to track mutations to inhibit maintenance ops safely. Furthermore, // the message tracker is a multi-index and therefore has some runtime cost. -TEST_F(DistributorStripeTest, gets_started_outside_main_thread_are_not_tracked_by_main_pending_message_tracker) +TEST_F(DistributorStripeTest, gets_started_outside_stripe_thread_are_not_tracked_by_pending_message_tracker) { set_up_and_start_get_op_with_stale_reads_enabled(true); Bucket bucket(FixedBucketSpaces::default_space(), BucketId(16, 1)); @@ -902,7 +902,7 @@ TEST_F(DistributorStripeTest, gets_started_outside_main_thread_are_not_tracked_b 0, bucket, api::MessageType::GET_ID)); } -TEST_F(DistributorStripeTest, closing_aborts_gets_started_outside_main_distributor_thread) +TEST_F(DistributorStripeTest, closing_aborts_gets_started_outside_stripe_thread) { set_up_and_start_get_op_with_stale_reads_enabled(true); _stripe->flush_and_close(); diff --git a/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp b/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp index 824fb51acb9..b871bf5841e 100644 --- a/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp +++ b/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp @@ -561,6 +561,7 @@ LegacyBucketDBUpdaterTest::LegacyBucketDBUpdaterTest() LegacyBucketDBUpdaterTest::~LegacyBucketDBUpdaterTest() = default; +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, normal_usage) { setSystemState(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3")); @@ -592,6 +593,7 @@ TEST_F(LegacyBucketDBUpdaterTest, normal_usage) { ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(10, "distributor:2 storage:3")); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, distributor_change) { int numBuckets = 100; @@ -622,6 +624,7 @@ TEST_F(LegacyBucketDBUpdaterTest, distributor_change) { ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, "distributor:2 storage:3")); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, distributor_change_with_grouping) { std::string distConfig(getDistConfig6Nodes2Groups()); setDistribution(distConfig); @@ -653,6 +656,7 @@ TEST_F(LegacyBucketDBUpdaterTest, distributor_change_with_grouping) { ASSERT_EQ(messageCount(6), _sender.commands().size()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, normal_usage_initializing) { setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1 .0.s:i")); @@ -690,6 +694,7 @@ TEST_F(LegacyBucketDBUpdaterTest, normal_usage_initializing) { ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(20, "distributor:1 storage:1")); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, failed_request_bucket_info) { setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1")); @@ -732,6 +737,7 @@ TEST_F(LegacyBucketDBUpdaterTest, failed_request_bucket_info) { EXPECT_EQ(std::string("Set system state"), _senderDown.getCommands()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, down_while_init) { ASSERT_NO_FATAL_FAILURE(setStorageNodes(3)); @@ -795,6 +801,7 @@ LegacyBucketDBUpdaterTest::expandNodeVec(const std::vector<uint16_t> &nodes) return res; } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, node_down) { ASSERT_NO_FATAL_FAILURE(setStorageNodes(3)); enableDistributorClusterState("distributor:1 storage:3"); @@ -810,6 +817,7 @@ TEST_F(LegacyBucketDBUpdaterTest, node_down) { EXPECT_FALSE(bucketExistsThatHasNode(100, 1)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node) { ASSERT_NO_FATAL_FAILURE(setStorageNodes(3)); enableDistributorClusterState("distributor:1 storage:3"); @@ -825,6 +833,7 @@ TEST_F(LegacyBucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for EXPECT_FALSE(bucketExistsThatHasNode(100, 1)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, node_down_copies_get_in_sync) { ASSERT_NO_FATAL_FAILURE(setStorageNodes(3)); @@ -842,6 +851,7 @@ TEST_F(LegacyBucketDBUpdaterTest, node_down_copies_get_in_sync) { dumpBucket(bid)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, initializing_while_recheck) { lib::ClusterState systemState("distributor:1 storage:2 .0.s:i .0.i:0.1"); setSystemState(systemState); @@ -860,6 +870,7 @@ TEST_F(LegacyBucketDBUpdaterTest, initializing_while_recheck) { EXPECT_EQ(MessageType::SETSYSTEMSTATE, _senderDown.command(0)->getType()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, bit_change) { std::vector<document::BucketId> bucketlist; @@ -959,6 +970,7 @@ TEST_F(LegacyBucketDBUpdaterTest, bit_change) { } }; +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, recheck_node_with_failure) { ASSERT_NO_FATAL_FAILURE(initializeNodesAndBuckets(3, 5)); @@ -1002,6 +1014,7 @@ TEST_F(LegacyBucketDBUpdaterTest, recheck_node_with_failure) { EXPECT_EQ(size_t(2), _sender.commands().size()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, recheck_node) { ASSERT_NO_FATAL_FAILURE(initializeNodesAndBuckets(3, 5)); @@ -1040,6 +1053,7 @@ TEST_F(LegacyBucketDBUpdaterTest, recheck_node) { EXPECT_EQ(api::BucketInfo(20,10,12, 50, 60, true, true), copy->getBucketInfo()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, notify_bucket_change) { enableDistributorClusterState("distributor:1 storage:1"); @@ -1103,6 +1117,7 @@ TEST_F(LegacyBucketDBUpdaterTest, notify_bucket_change) { dumpBucket(document::BucketId(16, 2))); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, notify_bucket_change_from_node_down) { enableDistributorClusterState("distributor:1 storage:2"); @@ -1149,6 +1164,7 @@ TEST_F(LegacyBucketDBUpdaterTest, notify_bucket_change_from_node_down) { dumpBucket(document::BucketId(16, 1))); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest /** * Test that NotifyBucketChange received while there's a pending cluster state * waits until the cluster state has been enabled as current before it sends off @@ -1196,6 +1212,7 @@ TEST_F(LegacyBucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket } } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, merge_reply) { enableDistributorClusterState("distributor:1 storage:3"); @@ -1238,6 +1255,7 @@ TEST_F(LegacyBucketDBUpdaterTest, merge_reply) { dumpBucket(document::BucketId(16, 1234))); }; +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down) { enableDistributorClusterState("distributor:1 storage:3"); std::vector<api::MergeBucketCommand::Node> nodes; @@ -1280,6 +1298,7 @@ TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down) { dumpBucket(document::BucketId(16, 1234))); }; +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) { enableDistributorClusterState("distributor:1 storage:3"); std::vector<api::MergeBucketCommand::Node> nodes; @@ -1322,7 +1341,7 @@ TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) { dumpBucket(document::BucketId(16, 1234))); }; - +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, flush) { enableDistributorClusterState("distributor:1 storage:3"); _sender.clear(); @@ -1401,6 +1420,7 @@ LegacyBucketDBUpdaterTest::getSentNodesDistributionChanged( return ost.str(); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_send_messages) { EXPECT_EQ(getNodeList({0, 1, 2}), getSentNodes("cluster:d", @@ -1498,6 +1518,7 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_send_messages) { "distributor:3 storage:3 .1.s:m")); }; +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_receive) { DistributorMessageSenderStub sender; @@ -1536,6 +1557,7 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_receive) { EXPECT_EQ(3, (int)pendingTransition.results().size()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down) { std::string config(getDistConfig6Nodes4Groups()); config += "distributor_auto_ownership_transfer_on_whole_group_down true\n"; @@ -1555,6 +1577,7 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down) { "distributor:6 .2.s:d storage:6")); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_handover) { std::string config(getDistConfig6Nodes4Groups()); config += "distributor_auto_ownership_transfer_on_whole_group_down false\n"; @@ -1566,6 +1589,8 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_h "distributor:6 .2.s:d .3.s:d storage:6")); } +namespace { + void parseInputData(const std::string& data, uint64_t timestamp, @@ -1640,6 +1665,8 @@ struct BucketDumper : public BucketDatabase::EntryProcessor } }; +} + std::string LegacyBucketDBUpdaterTest::mergeBucketLists( const lib::ClusterState& oldState, @@ -1708,6 +1735,7 @@ LegacyBucketDBUpdaterTest::mergeBucketLists(const std::string& existingData, includeBucketInfo); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge) { // Simple initializing case - ask all nodes for info EXPECT_EQ( @@ -1747,6 +1775,7 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge) { mergeBucketLists("", "0:5/0/0/0|1:5/2/3/4", true)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) { // Node went from initializing to up and non-invalid bucket changed. EXPECT_EQ( @@ -1759,6 +1788,7 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) { true)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_state) { document::BucketId bucket(16, 3); lib::ClusterState stateBefore("distributor:1 storage:1"); @@ -1788,6 +1818,7 @@ TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_cur EXPECT_EQ(std::string("NONEXISTING"), dumpBucket(bucket)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) { document::BucketId bucket(16, 3); lib::ClusterState stateBefore("distributor:1 storage:1"); @@ -1815,6 +1846,7 @@ TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pen EXPECT_EQ(std::string("NONEXISTING"), dumpBucket(bucket)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest /* * If we get a distribution config change, it's important that cluster states that * arrive after this--but _before_ the pending cluster state has finished--must trigger @@ -1864,6 +1896,7 @@ TEST_F(LegacyBucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_dis EXPECT_EQ(size_t(0), _sender.commands().size()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) { ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"), messageCount(6), 20)); _sender.clear(); @@ -1913,6 +1946,7 @@ std::unique_ptr<BucketDatabase::EntryProcessor> func_processor(Func&& f) { } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db_pruning) { setDistribution(getDistConfig3Nodes1Group()); @@ -1932,6 +1966,7 @@ TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_does_not_elide_buc })); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestamp) { getClock().setAbsoluteTimeInSeconds(101234); lib::ClusterState stateBefore("distributor:1 storage:1"); @@ -1947,6 +1982,7 @@ TEST_F(LegacyBucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_ti EXPECT_EQ(uint32_t(101234), e->getLastGarbageCollectionTime()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fetch) { { lib::ClusterState stateBefore("distributor:1 storage:1 .0.s:i"); @@ -2035,6 +2071,7 @@ LegacyBucketDBUpdaterTest::getSentNodesWithPreemption( using nodeVec = std::vector<uint16_t>; +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest /* * If we don't carry over the set of nodes that we need to fetch from, * a naive comparison between the active state and the new state will @@ -2051,6 +2088,7 @@ TEST_F(LegacyBucketDBUpdaterTest, preempted_distributor_change_carries_node_set_ "version:3 distributor:6 storage:6")); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_next_state_fetch) { EXPECT_EQ( expandNodeVec({2, 3}), @@ -2061,6 +2099,7 @@ TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_change_carries_node_set_over "version:3 distributor:6 storage:6")); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) { EXPECT_EQ( expandNodeVec({2}), @@ -2071,6 +2110,7 @@ TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched "version:3 distributor:6 storage:6")); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) { EXPECT_EQ( nodeVec{}, @@ -2081,6 +2121,7 @@ TEST_F(LegacyBucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_stat "version:3 distributor:6 storage:6 .2.s:d")); // 2 down again. } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) { // Even though 100 nodes are preempted, not all of these should be part // of the request afterwards when only 6 are part of the state. @@ -2093,6 +2134,7 @@ TEST_F(LegacyBucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) { "version:3 distributor:6 storage:6")); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_completion) { lib::ClusterState stateBefore( "version:1 distributor:6 storage:6 .1.t:1234"); @@ -2107,6 +2149,7 @@ TEST_F(LegacyBucketDBUpdaterTest, outdated_node_set_cleared_after_successful_sta EXPECT_EQ(size_t(0), _sender.commands().size()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest (despite being disabled) // XXX test currently disabled since distribution config currently isn't used // at all in order to deduce the set of nodes to send to. This might not matter // in practice since it is assumed that the cluster state matching the new @@ -2128,6 +2171,7 @@ TEST_F(LegacyBucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to EXPECT_EQ((nodeVec{0, 1, 2}), getSendSet()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest /** * Test scenario where a cluster is downsized by removing a subset of the nodes * from the distribution configuration. The system must be able to deal with @@ -2172,6 +2216,7 @@ TEST_F(LegacyBucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing EXPECT_EQ(expandNodeVec({0, 1}), getSendSet()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer) { auto fixture = createPendingStateFixtureForStateChange( "distributor:2 storage:2", "distributor:1 storage:2"); @@ -2182,6 +2227,7 @@ TEST_F(LegacyBucketDBUpdaterTest, changed_distributor_set_implies_ownership_tran EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_transfer) { auto fixture = createPendingStateFixtureForStateChange( "distributor:2 storage:2", "distributor:2 storage:1"); @@ -2192,18 +2238,21 @@ TEST_F(LegacyBucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_implies_ownership_transfer) { auto fixture = createPendingStateFixtureForDistributionChange( "distributor:2 storage:2"); EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_single_state_change) { ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:2", 5, messageCount(2))); EXPECT_EQ(uint64_t(5000), lastTransitionTimeInMillis()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, transition_time_reset_across_non_preempting_state_changes) { ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:2", 5, messageCount(2))); ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:3", 3, messageCount(1))); @@ -2211,6 +2260,7 @@ TEST_F(LegacyBucketDBUpdaterTest, transition_time_reset_across_non_preempting_st EXPECT_EQ(uint64_t(3000), lastTransitionTimeInMillis()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_distribution_config_change) { lib::ClusterState state("distributor:2 storage:2"); ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(state, messageCount(2), 1)); @@ -2223,6 +2273,7 @@ TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_distribution_confi EXPECT_EQ(uint64_t(4000), lastTransitionTimeInMillis()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions) { _sender.clear(); lib::ClusterState state("distributor:2 storage:2"); @@ -2236,6 +2287,7 @@ TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_across_preempted_trans EXPECT_EQ(uint64_t(8000), lastTransitionTimeInMillis()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest /* * Brief reminder on test DSL for checking bucket merge operations: * @@ -2259,31 +2311,37 @@ TEST_F(LegacyBucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_do "0:5/1/2/3|1:5/7/8/9", true)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, batch_add_of_new_diverging_replicas_does_not_mark_any_as_trusted) { EXPECT_EQ(std::string("5:1/7/8/9/u,0/1/2/3/u|"), mergeBucketLists("", "0:5/1/2/3|1:5/7/8/9", true)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, batch_add_with_single_resulting_replica_implicitly_marks_as_trusted) { EXPECT_EQ(std::string("5:0/1/2/3/t|"), mergeBucketLists("", "0:5/1/2/3", true)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, identity_update_of_single_replica_does_not_clear_trusted) { EXPECT_EQ(std::string("5:0/1/2/3/t|"), mergeBucketLists("0:5/1/2/3", "0:5/1/2/3", true)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, identity_update_of_diverging_untrusted_replicas_does_not_mark_any_as_trusted) { EXPECT_EQ(std::string("5:1/7/8/9/u,0/1/2/3/u|"), mergeBucketLists("0:5/1/2/3|1:5/7/8/9", "0:5/1/2/3|1:5/7/8/9", true)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, adding_diverging_replica_to_existing_trusted_does_not_remove_trusted) { EXPECT_EQ(std::string("5:1/2/3/4/u,0/1/2/3/t|"), mergeBucketLists("0:5/1/2/3", "0:5/1/2/3|1:5/2/3/4", true)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_diverging_replicas_as_trusted) { // This differs from batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted // in that _all_ content nodes are considered outdated when distributor changes take place, @@ -2299,6 +2357,7 @@ TEST_F(LegacyBucketDBUpdaterTest, batch_update_from_distributor_change_does_not_ "0:5/1/2/3|1:5/7/8/9", true)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest // TODO remove on Vespa 8 - this is a workaround for https://github.com/vespa-engine/vespa/issues/8475 TEST_F(LegacyBucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format_upon_request_rejection) { std::string distConfig(getDistConfig6Nodes2Groups()); @@ -2368,6 +2427,7 @@ void for_each_bucket(const DistributorBucketSpaceRepo& repo, Func&& f) { } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership_change) { getBucketDBUpdater().set_stale_reads_enabled(true); @@ -2409,6 +2469,7 @@ TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_own }); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_only_database) { constexpr uint32_t n_buckets = 10; // No ownership change, just node down. Test redundancy is 2, so removing 2 nodes will @@ -2420,6 +2481,7 @@ TEST_F(LegacyBucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_r EXPECT_EQ(size_t(0), read_only_global_db().size()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_config_disabled) { getBucketDBUpdater().set_stale_reads_enabled(false); @@ -2465,6 +2527,7 @@ void LegacyBucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transiti _sender.clear(); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until_activation_received) { getBucketDBUpdater().set_stale_reads_enabled(true); constexpr uint32_t n_buckets = 10; @@ -2485,6 +2548,7 @@ TEST_F(LegacyBucketDBUpdaterTest, deferred_activated_state_does_not_enable_state EXPECT_EQ(uint64_t(n_buckets), mutable_global_db().size()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated) { getBucketDBUpdater().set_stale_reads_enabled(true); constexpr uint32_t n_buckets = 10; @@ -2497,6 +2561,7 @@ TEST_F(LegacyBucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_act EXPECT_EQ(uint64_t(0), read_only_global_db().size()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_down) { getBucketDBUpdater().set_stale_reads_enabled(true); constexpr uint32_t n_buckets = 10; @@ -2511,6 +2576,7 @@ TEST_F(LegacyBucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_ma EXPECT_EQ(uint64_t(n_buckets), read_only_global_db().size()); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_version_returns_actual_version) { getBucketDBUpdater().set_stale_reads_enabled(true); constexpr uint32_t n_buckets = 10; @@ -2525,6 +2591,7 @@ TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_with_mismatchin ASSERT_NO_FATAL_FAILURE(assert_has_activate_cluster_state_reply_with_actual_version(5)); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_without_pending_transition_passes_message_through) { getBucketDBUpdater().set_stale_reads_enabled(true); constexpr uint32_t n_buckets = 10; @@ -2541,6 +2608,7 @@ TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_without_pending EXPECT_EQ(size_t(0), _sender.replies().size()); } +// TODO STRIPE disabled benchmark tests are NOT migrated to new test suite TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) { // Need to trigger an initial edge to complete first bucket scan ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:2 storage:1"), @@ -2659,6 +2727,7 @@ TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_ fprintf(stderr, "Took %g seconds to scan and remove %u buckets\n", timer.min_time(), n_buckets); } +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_state_is_pending) { auto initial_baseline = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d"); auto initial_default = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m"); @@ -2684,7 +2753,7 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_ EXPECT_TRUE(state == nullptr); } -struct BucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest { +struct LegacyBucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest { lib::ClusterState empty_state; std::shared_ptr<lib::ClusterState> initial_baseline; std::shared_ptr<lib::ClusterState> initial_default; @@ -2692,7 +2761,7 @@ struct BucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest { Bucket default_bucket; Bucket global_bucket; - BucketDBUpdaterSnapshotTest() + LegacyBucketDBUpdaterSnapshotTest() : LegacyBucketDBUpdaterTest(), empty_state(), initial_baseline(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d")), @@ -2703,7 +2772,7 @@ struct BucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest { global_bucket(FixedBucketSpaces::global_space(), BucketId(16, 1234)) { } - ~BucketDBUpdaterSnapshotTest() override; + ~LegacyBucketDBUpdaterSnapshotTest() override; void SetUp() override { LegacyBucketDBUpdaterTest::SetUp(); @@ -2730,19 +2799,22 @@ struct BucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest { } }; -BucketDBUpdaterSnapshotTest::~BucketDBUpdaterSnapshotTest() = default; +LegacyBucketDBUpdaterSnapshotTest::~LegacyBucketDBUpdaterSnapshotTest() = default; -TEST_F(BucketDBUpdaterSnapshotTest, default_space_snapshot_prior_to_activated_state_is_non_routable) { +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest +TEST_F(LegacyBucketDBUpdaterSnapshotTest, default_space_snapshot_prior_to_activated_state_is_non_routable) { auto rs = getBucketDBUpdater().read_snapshot_for_bucket(default_bucket); EXPECT_FALSE(rs.is_routable()); } -TEST_F(BucketDBUpdaterSnapshotTest, global_space_snapshot_prior_to_activated_state_is_non_routable) { +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest +TEST_F(LegacyBucketDBUpdaterSnapshotTest, global_space_snapshot_prior_to_activated_state_is_non_routable) { auto rs = getBucketDBUpdater().read_snapshot_for_bucket(global_bucket); EXPECT_FALSE(rs.is_routable()); } -TEST_F(BucketDBUpdaterSnapshotTest, read_snapshot_returns_appropriate_cluster_states) { +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest +TEST_F(LegacyBucketDBUpdaterSnapshotTest, read_snapshot_returns_appropriate_cluster_states) { set_cluster_state_bundle(initial_bundle); // State currently pending, empty initial state is active @@ -2772,7 +2844,8 @@ TEST_F(BucketDBUpdaterSnapshotTest, read_snapshot_returns_appropriate_cluster_st EXPECT_FALSE(global_rs.context().has_pending_state_transition()); } -TEST_F(BucketDBUpdaterSnapshotTest, snapshot_with_no_pending_state_transition_returns_mutable_db_guard) { +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest +TEST_F(LegacyBucketDBUpdaterSnapshotTest, snapshot_with_no_pending_state_transition_returns_mutable_db_guard) { constexpr uint32_t n_buckets = 10; ASSERT_NO_FATAL_FAILURE( trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4, @@ -2784,7 +2857,8 @@ TEST_F(BucketDBUpdaterSnapshotTest, snapshot_with_no_pending_state_transition_re n_buckets); } -TEST_F(BucketDBUpdaterSnapshotTest, snapshot_returns_unroutable_for_non_owned_bucket_in_current_state) { +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest +TEST_F(LegacyBucketDBUpdaterSnapshotTest, snapshot_returns_unroutable_for_non_owned_bucket_in_current_state) { ASSERT_NO_FATAL_FAILURE( trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4, "version:2 distributor:2 .0.s:d storage:4", 0, 0)); @@ -2794,7 +2868,8 @@ TEST_F(BucketDBUpdaterSnapshotTest, snapshot_returns_unroutable_for_non_owned_bu EXPECT_FALSE(def_rs.is_routable()); } -TEST_F(BucketDBUpdaterSnapshotTest, snapshot_with_pending_state_returns_read_only_guard_for_bucket_only_owned_in_current_state) { +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest +TEST_F(LegacyBucketDBUpdaterSnapshotTest, snapshot_with_pending_state_returns_read_only_guard_for_bucket_only_owned_in_current_state) { constexpr uint32_t n_buckets = 10; ASSERT_NO_FATAL_FAILURE( trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4, @@ -2805,7 +2880,8 @@ TEST_F(BucketDBUpdaterSnapshotTest, snapshot_with_pending_state_returns_read_onl n_buckets); } -TEST_F(BucketDBUpdaterSnapshotTest, snapshot_is_unroutable_if_stale_reads_disabled_and_bucket_not_owned_in_pending_state) { +// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest +TEST_F(LegacyBucketDBUpdaterSnapshotTest, snapshot_is_unroutable_if_stale_reads_disabled_and_bucket_not_owned_in_pending_state) { getBucketDBUpdater().set_stale_reads_enabled(false); constexpr uint32_t n_buckets = 10; ASSERT_NO_FATAL_FAILURE( diff --git a/storage/src/tests/distributor/pendingmessagetrackertest.cpp b/storage/src/tests/distributor/pendingmessagetrackertest.cpp index 20ffd216e3d..293e5d02d11 100644 --- a/storage/src/tests/distributor/pendingmessagetrackertest.cpp +++ b/storage/src/tests/distributor/pendingmessagetrackertest.cpp @@ -149,7 +149,7 @@ Fixture::Fixture() _clock.setAbsoluteTimeInSeconds(1); // Have to set clock in compReg before constructing tracker, or it'll // flip out and die on an explicit nullptr check. - _tracker = std::make_unique<PendingMessageTracker>(_compReg); + _tracker = std::make_unique<PendingMessageTracker>(_compReg, 0); } Fixture::~Fixture() = default; @@ -160,7 +160,7 @@ TEST_F(PendingMessageTrackerTest, simple) { framework::defaultimplementation::FakeClock clock; compReg.setClock(clock); clock.setAbsoluteTimeInSeconds(1); - PendingMessageTracker tracker(compReg); + PendingMessageTracker tracker(compReg, 0); auto remove = std::make_shared<api::RemoveCommand>( makeDocumentBucket(document::BucketId(16, 1234)), @@ -217,14 +217,14 @@ TEST_F(PendingMessageTrackerTest, start_page) { StorageComponentRegisterImpl compReg; framework::defaultimplementation::FakeClock clock; compReg.setClock(clock); - PendingMessageTracker tracker(compReg); + PendingMessageTracker tracker(compReg, 3); { std::ostringstream ost; - tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages")); + tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages3")); EXPECT_THAT(ost.str(), HasSubstr( - "<h1>Pending messages to storage nodes</h1>\n" + "<h1>Pending messages to storage nodes (stripe 3)</h1>\n" "View:\n" "<ul>\n" "<li><a href=\"?order=bucket\">Group by bucket</a></li>" @@ -237,7 +237,7 @@ TEST_F(PendingMessageTrackerTest, multiple_messages) { framework::defaultimplementation::FakeClock clock; compReg.setClock(clock); clock.setAbsoluteTimeInSeconds(1); - PendingMessageTracker tracker(compReg); + PendingMessageTracker tracker(compReg, 0); insertMessages(tracker); @@ -332,7 +332,7 @@ TEST_F(PendingMessageTrackerTest, get_pending_message_types) { framework::defaultimplementation::FakeClock clock; compReg.setClock(clock); clock.setAbsoluteTimeInSeconds(1); - PendingMessageTracker tracker(compReg); + PendingMessageTracker tracker(compReg, 0); document::BucketId bid(16, 1234); auto remove = std::make_shared<api::RemoveCommand>(makeDocumentBucket(bid), @@ -364,7 +364,7 @@ TEST_F(PendingMessageTrackerTest, has_pending_message) { framework::defaultimplementation::FakeClock clock; compReg.setClock(clock); clock.setAbsoluteTimeInSeconds(1); - PendingMessageTracker tracker(compReg); + PendingMessageTracker tracker(compReg, 0); document::BucketId bid(16, 1234); EXPECT_FALSE(tracker.hasPendingMessage(1, makeDocumentBucket(bid), api::MessageType::REMOVE_ID)); @@ -407,7 +407,7 @@ TEST_F(PendingMessageTrackerTest, get_all_messages_for_single_bucket) { framework::defaultimplementation::FakeClock clock; compReg.setClock(clock); clock.setAbsoluteTimeInSeconds(1); - PendingMessageTracker tracker(compReg); + PendingMessageTracker tracker(compReg, 0); insertMessages(tracker); diff --git a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp new file mode 100644 index 00000000000..01f7d5a4f0a --- /dev/null +++ b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp @@ -0,0 +1,2665 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/storageapi/message/persistence.h> +#include <vespa/storage/distributor/top_level_bucket_db_updater.h> +#include <vespa/storage/distributor/bucket_space_distribution_context.h> +#include <vespa/storage/distributor/distributormetricsset.h> +#include <vespa/storage/distributor/pending_bucket_space_db_transition.h> +#include <vespa/storage/distributor/outdated_nodes_map.h> +#include <vespa/storage/storageutil/distributorstatecache.h> +#include <tests/distributor/top_level_distributor_test_util.h> +#include <vespa/document/test/make_document_bucket.h> +#include <vespa/document/test/make_bucket_space.h> +#include <vespa/document/bucket/fixed_bucket_spaces.h> +#include <vespa/metrics/updatehook.h> +#include <vespa/storage/distributor/simpleclusterinformation.h> +#include <vespa/storage/distributor/top_level_distributor.h> +#include <vespa/storage/distributor/distributor_stripe.h> +#include <vespa/storage/distributor/distributor_bucket_space.h> +#include <vespa/vespalib/gtest/gtest.h> +#include <vespa/vespalib/text/stringtokenizer.h> +#include <sstream> +#include <iomanip> + +using namespace storage::api; +using namespace storage::lib; +using document::test::makeDocumentBucket; +using document::test::makeBucketSpace; +using document::BucketSpace; +using document::FixedBucketSpaces; +using document::BucketId; +using document::Bucket; + +using namespace ::testing; + +namespace storage::distributor { + +class TopLevelBucketDBUpdaterTest : public Test, + public TopLevelDistributorTestUtil +{ +public: + using OutdatedNodesMap = dbtransition::OutdatedNodesMap; + + TopLevelBucketDBUpdaterTest(); + ~TopLevelBucketDBUpdaterTest() override; + + std::vector<document::BucketSpace> _bucket_spaces; + + size_t message_count(size_t messagesPerBucketSpace) const { + return messagesPerBucketSpace * _bucket_spaces.size(); + } + + using NodeCount = int; + using Redundancy = int; + + void SetUp() override { + create_links(); + _bucket_spaces = bucket_spaces(); + // Disable deferred activation by default (at least for now) to avoid breaking the entire world. + bucket_db_updater().set_stale_reads_enabled(false); + setup_distributor(Redundancy(2), NodeCount(10), "cluster:d"); + }; + + void TearDown() override { + close(); + } + + std::shared_ptr<RequestBucketInfoReply> make_fake_bucket_reply( + const lib::ClusterState& state, + const RequestBucketInfoCommand& cmd, + int storageIndex, + uint32_t bucketCount, + uint32_t invalidBucketCount = 0) + { + auto sreply = std::make_shared<RequestBucketInfoReply>(cmd); + sreply->setAddress(storage_address(storageIndex)); + + auto& vec = sreply->getBucketInfo(); + + for (uint32_t i=0; i<bucketCount + invalidBucketCount; i++) { + document::BucketId bucket(16, i); + if (!distributor_bucket_space(bucket).owns_bucket_in_state(state, bucket)) { + continue; + } + + std::vector<uint16_t> nodes; + distributor_bucket_space(bucket).getDistribution().getIdealNodes( + lib::NodeType::STORAGE, state, bucket, nodes); + + for (uint32_t j = 0; j < nodes.size(); ++j) { + if (nodes[j] == storageIndex) { + if (i >= bucketCount) { + vec.push_back(api::RequestBucketInfoReply::Entry( + document::BucketId(16, i), + api::BucketInfo())); + } else { + vec.push_back(api::RequestBucketInfoReply::Entry( + document::BucketId(16, i), + api::BucketInfo(10,1,1))); + } + } + } + } + + return sreply; + } + + void fake_bucket_reply(const lib::ClusterState &state, + const api::StorageCommand &cmd, + uint32_t bucket_count, + uint32_t invalid_bucket_count = 0) + { + ASSERT_EQ(cmd.getType(), MessageType::REQUESTBUCKETINFO); + const api::StorageMessageAddress& address(*cmd.getAddress()); + bucket_db_updater().onRequestBucketInfoReply( + make_fake_bucket_reply(state, + dynamic_cast<const RequestBucketInfoCommand &>(cmd), + address.getIndex(), + bucket_count, + invalid_bucket_count)); + } + + void send_fake_reply_for_single_bucket_request( + const api::RequestBucketInfoCommand& rbi) + { + ASSERT_EQ(size_t(1), rbi.getBuckets().size()); + const document::BucketId& bucket(rbi.getBuckets()[0]); + + auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi); + reply->getBucketInfo().push_back( + api::RequestBucketInfoReply::Entry(bucket, api::BucketInfo(20, 10, 12, 50, 60, true, true))); + stripe_of_bucket(bucket).bucket_db_updater().onRequestBucketInfoReply(reply); + } + + std::string verify_bucket(document::BucketId id, const lib::ClusterState& state) { + BucketDatabase::Entry entry = get_bucket(id); + if (!entry.valid()) { + return vespalib::make_string("%s doesn't exist in DB", id.toString().c_str()); + } + + std::vector<uint16_t> nodes; + distributor_bucket_space(id).getDistribution().getIdealNodes( + lib::NodeType::STORAGE, state, document::BucketId(id), nodes); + + if (nodes.size() != entry->getNodeCount()) { + return vespalib::make_string("Bucket Id %s has %d nodes in " + "ideal state, but has only %d in DB", + id.toString().c_str(), + (int)nodes.size(), + (int)entry->getNodeCount()); + } + + for (uint32_t i = 0; i<nodes.size(); i++) { + bool found = false; + + for (uint32_t j = 0; j<entry->getNodeCount(); j++) { + if (nodes[i] == entry->getNodeRef(j).getNode()) { + found = true; + } + } + + if (!found) { + return vespalib::make_string( + "Bucket Id %s has no copy from node %d", + id.toString().c_str(), + nodes[i]); + } + } + + return ""; + } + + struct OrderByIncreasingNodeIndex { + template <typename T> + bool operator()(const T& lhs, const T& rhs) { + return (lhs->getAddress()->getIndex() + < rhs->getAddress()->getIndex()); + } + }; + + void sort_sent_messages_by_index(DistributorMessageSenderStub& sender, + size_t sortFromOffset = 0) + { + std::sort(sender.commands().begin() + sortFromOffset, + sender.commands().end(), + OrderByIncreasingNodeIndex()); + } + + void set_cluster_state(const lib::ClusterState& state) { + const size_t size_before_state = _sender.commands().size(); + bucket_db_updater().onSetSystemState(std::make_shared<api::SetSystemStateCommand>(state)); + // A lot of test logic has the assumption that all messages sent as a + // result of cluster state changes will be in increasing index order + // (for simplicity, not because this is required for correctness). + // Only sort the messages that arrived as a result of the state, don't + // jumble the sorting with any existing messages. + sort_sent_messages_by_index(_sender, size_before_state); + } + + void set_cluster_state_bundle(const lib::ClusterStateBundle& state) { + const size_t size_before_state = _sender.commands().size(); + bucket_db_updater().onSetSystemState(std::make_shared<api::SetSystemStateCommand>(state)); + sort_sent_messages_by_index(_sender, size_before_state); + } + + void set_cluster_state(const vespalib::string& state_str) { + set_cluster_state(lib::ClusterState(state_str)); + } + + bool activate_cluster_state_version(uint32_t version) { + return bucket_db_updater().onActivateClusterStateVersion( + std::make_shared<api::ActivateClusterStateVersionCommand>(version)); + } + + void assert_has_activate_cluster_state_reply_with_actual_version(uint32_t version) { + ASSERT_EQ(size_t(1), _sender.replies().size()); + auto* response = dynamic_cast<api::ActivateClusterStateVersionReply*>(_sender.replies().back().get()); + ASSERT_TRUE(response != nullptr); + ASSERT_EQ(version, response->actualVersion()); + _sender.clear(); + } + + void complete_bucket_info_gathering(const lib::ClusterState& state, + size_t expected_msgs, + uint32_t bucket_count = 1, + uint32_t invalid_bucket_count = 0) + { + ASSERT_EQ(expected_msgs, _sender.commands().size()); + + for (uint32_t i = 0; i < _sender.commands().size(); i++) { + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(state, *_sender.command(i), + bucket_count, invalid_bucket_count)); + } + } + + api::StorageMessageAddress storage_address(uint16_t node) { + static vespalib::string _storage("storage"); + return api::StorageMessageAddress(&_storage, lib::NodeType::STORAGE, node); + } + + void assert_correct_buckets(int num_buckets, const std::string& state_str) { + lib::ClusterState state(state_str); + for (int i = 0; i < num_buckets; i++) { + ASSERT_EQ(get_ideal_str(document::BucketId(16, i), state), + get_nodes(document::BucketId(16, i))); + } + } + + void set_distribution(const std::string& dist_config) { + trigger_distribution_change(std::make_shared<lib::Distribution>(dist_config)); + } + + void verify_invalid(document::BucketId id, int storageNode) { + BucketDatabase::Entry entry = get_bucket(id); + ASSERT_TRUE(entry.valid()); + bool found = false; + for (uint32_t j = 0; j < entry->getNodeCount(); j++) { + if (entry->getNodeRef(j).getNode() == storageNode) { + ASSERT_FALSE(entry->getNodeRef(j).valid()); + found = true; + } + } + + ASSERT_TRUE(found); + } + + void set_storage_nodes(uint32_t numStorageNodes) { + _sender.clear(); + set_cluster_state(lib::ClusterState(vespalib::make_string("distributor:1 storage:%d", numStorageNodes))); + + for (uint32_t i=0; i< message_count(numStorageNodes); i++) { + ASSERT_EQ(_sender.command(i)->getType(), MessageType::REQUESTBUCKETINFO); + + const api::StorageMessageAddress *address = _sender.command(i)->getAddress(); + ASSERT_EQ(i / _bucket_spaces.size(), address->getIndex()); + } + } + + bool bucket_has_node(document::BucketId id, uint16_t node) const { + BucketDatabase::Entry entry = get_bucket(id); + assert(entry.valid()); + + for (uint32_t j = 0; j < entry->getNodeCount(); ++j) { + if (entry->getNodeRef(j).getNode() == node) { + return true; + } + } + return false; + } + + bool bucket_exists_that_has_node(int bucket_count, uint16_t node) const { + for (int i = 1; i < bucket_count; ++i) { + if (bucket_has_node(document::BucketId(16, i), node)) { + return true; + } + } + return false; + } + + std::string dump_bucket(const document::BucketId& id) const { + return get_bucket(id).toString(); + } + + void initialize_nodes_and_buckets(uint32_t num_storage_nodes, uint32_t num_buckets) { + ASSERT_NO_FATAL_FAILURE(set_storage_nodes(num_storage_nodes)); + + vespalib::string state(vespalib::make_string("distributor:1 storage:%d", num_storage_nodes)); + lib::ClusterState new_state(state); + + for (uint32_t i = 0; i < message_count(num_storage_nodes); ++i) { + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(new_state, *_sender.command(i), num_buckets)); + } + ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(num_buckets, state)); + } + + void set_and_enable_cluster_state(const lib::ClusterState& state, uint32_t expected_msgs, uint32_t n_buckets) { + _sender.clear(); + set_cluster_state(state); + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(state, expected_msgs, n_buckets)); + } + + void complete_state_transition_in_seconds(const std::string& stateStr, + uint32_t seconds, + uint32_t expectedMsgs) + { + _sender.clear(); + lib::ClusterState state(stateStr); + set_cluster_state(state); + fake_clock().addSecondsToTime(seconds); + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(state, expectedMsgs)); + } + + uint64_t last_transition_time_in_millis() { + { + // Force stripe metrics to be aggregated into total. + std::mutex l; + distributor_metric_update_hook().updateMetrics(metrics::MetricLockGuard(l)); + } + return uint64_t(total_distributor_metrics().stateTransitionTime.getLast()); + } + + ClusterInformation::CSP create_cluster_info(const std::string& clusterStateString) { + lib::ClusterState baseline_cluster_state(clusterStateString); + lib::ClusterStateBundle cluster_state_bundle(baseline_cluster_state); + auto cluster_info = std::make_shared<SimpleClusterInformation>( + _distributor->node_identity().node_index(), + cluster_state_bundle, + "ui"); + enable_distributor_cluster_state(clusterStateString); + return cluster_info; + } + + struct PendingClusterStateFixture { + DistributorMessageSenderStub sender; + framework::defaultimplementation::FakeClock clock; + std::unique_ptr<PendingClusterState> state; + + PendingClusterStateFixture( + TopLevelBucketDBUpdaterTest& owner, + const std::string& old_cluster_state, + const std::string& new_cluster_state) + { + auto cmd = std::make_shared<api::SetSystemStateCommand>(lib::ClusterState(new_cluster_state)); + auto cluster_info = owner.create_cluster_info(old_cluster_state); + OutdatedNodesMap outdated_nodes_map; + state = PendingClusterState::createForClusterStateChange( + clock, cluster_info, sender, + owner.top_level_bucket_space_repo(), + cmd, outdated_nodes_map, api::Timestamp(1)); + } + + PendingClusterStateFixture( + TopLevelBucketDBUpdaterTest& owner, + const std::string& old_cluster_state) + { + auto cluster_info = owner.create_cluster_info(old_cluster_state); + state = PendingClusterState::createForDistributionChange( + clock, cluster_info, sender, owner.top_level_bucket_space_repo(), api::Timestamp(1)); + } + }; + + std::unique_ptr<PendingClusterStateFixture> create_pending_state_fixture_for_state_change( + const std::string& oldClusterState, + const std::string& newClusterState) + { + return std::make_unique<PendingClusterStateFixture>(*this, oldClusterState, newClusterState); + } + + std::unique_ptr<PendingClusterStateFixture> create_pending_state_fixture_for_distribution_change( + const std::string& oldClusterState) + { + return std::make_unique<PendingClusterStateFixture>(*this, oldClusterState); + } + + std::string get_sent_nodes(const std::string& old_cluster_state, + const std::string& new_cluster_state); + + std::string get_sent_nodes_distribution_changed(const std::string& old_cluster_state); + + std::string get_node_list(const std::vector<uint16_t>& nodes, size_t count); + std::string get_node_list(const std::vector<uint16_t>& nodes); + + std::string merge_bucket_lists(const lib::ClusterState& old_state, + const std::string& existing_data, + const lib::ClusterState& new_state, + const std::string& new_data, + bool include_bucket_info = false); + + std::string merge_bucket_lists(const std::string& existingData, + const std::string& newData, + bool includeBucketInfo = false); + + std::vector<uint16_t> get_send_set() const; + + std::vector<uint16_t> get_sent_nodes_with_preemption( + const std::string& old_cluster_state, + uint32_t expected_old_state_messages, + const std::string& preempted_cluster_state, + const std::string& new_cluster_state); + + std::vector<uint16_t> expand_node_vec(const std::vector<uint16_t>& nodes); + + void trigger_completed_but_not_yet_activated_transition( + vespalib::stringref initial_state_str, + uint32_t initial_buckets, + uint32_t initial_expected_msgs, + vespalib::stringref pending_state_str, + uint32_t pending_buckets, + uint32_t pending_expected_msgs); + + const DistributorBucketSpaceRepo& mutable_repo(DistributorStripe& s) const noexcept { + return s.getBucketSpaceRepo(); + } + // Note: not calling this "immutable_repo" since it may actually be modified by the pending + // cluster state component (just not by operations), so it would not have the expected semantics. + const DistributorBucketSpaceRepo& read_only_repo(DistributorStripe& s) const noexcept { + return s.getReadOnlyBucketSpaceRepo(); + } + + const BucketDatabase& mutable_default_db(DistributorStripe& s) const noexcept { + return mutable_repo(s).get(FixedBucketSpaces::default_space()).getBucketDatabase(); + } + const BucketDatabase& mutable_global_db(DistributorStripe& s) const noexcept { + return mutable_repo(s).get(FixedBucketSpaces::global_space()).getBucketDatabase(); + } + const BucketDatabase& read_only_default_db(DistributorStripe& s) const noexcept { + return read_only_repo(s).get(FixedBucketSpaces::default_space()).getBucketDatabase(); + } + const BucketDatabase& read_only_global_db(DistributorStripe& s) const noexcept { + return read_only_repo(s).get(FixedBucketSpaces::global_space()).getBucketDatabase(); + } + + void set_stale_reads_enabled(bool enabled) { + for (auto* s : distributor_stripes()) { + s->bucket_db_updater().set_stale_reads_enabled(enabled); + } + bucket_db_updater().set_stale_reads_enabled(enabled); + } + + size_t mutable_default_dbs_size() const { + size_t total = 0; + for (auto* s : distributor_stripes()) { + total += mutable_default_db(*s).size(); + } + return total; + } + + size_t mutable_global_dbs_size() const { + size_t total = 0; + for (auto* s : distributor_stripes()) { + total += mutable_global_db(*s).size(); + } + return total; + } + + size_t read_only_default_dbs_size() const { + size_t total = 0; + for (auto* s : distributor_stripes()) { + total += read_only_default_db(*s).size(); + } + return total; + } + + size_t read_only_global_dbs_size() const { + size_t total = 0; + for (auto* s : distributor_stripes()) { + total += read_only_global_db(*s).size(); + } + return total; + } + +}; + +TopLevelBucketDBUpdaterTest::TopLevelBucketDBUpdaterTest() + : TopLevelDistributorTestUtil(), + _bucket_spaces() +{ +} + +TopLevelBucketDBUpdaterTest::~TopLevelBucketDBUpdaterTest() = default; + +namespace { + +std::string dist_config_6_nodes_across_2_groups() { + return ("redundancy 2\n" + "group[3]\n" + "group[0].name \"invalid\"\n" + "group[0].index \"invalid\"\n" + "group[0].partitions 1|*\n" + "group[0].nodes[0]\n" + "group[1].name rack0\n" + "group[1].index 0\n" + "group[1].nodes[3]\n" + "group[1].nodes[0].index 0\n" + "group[1].nodes[1].index 1\n" + "group[1].nodes[2].index 2\n" + "group[2].name rack1\n" + "group[2].index 1\n" + "group[2].nodes[3]\n" + "group[2].nodes[0].index 3\n" + "group[2].nodes[1].index 4\n" + "group[2].nodes[2].index 5\n"); +} + +std::string dist_config_6_nodes_across_4_groups() { + return ("redundancy 2\n" + "group[4]\n" + "group[0].name \"invalid\"\n" + "group[0].index \"invalid\"\n" + "group[0].partitions 1|*\n" + "group[0].nodes[0]\n" + "group[1].name rack0\n" + "group[1].index 0\n" + "group[1].nodes[2]\n" + "group[1].nodes[0].index 0\n" + "group[1].nodes[1].index 1\n" + "group[2].name rack1\n" + "group[2].index 1\n" + "group[2].nodes[2]\n" + "group[2].nodes[0].index 2\n" + "group[2].nodes[1].index 3\n" + "group[3].name rack2\n" + "group[3].index 2\n" + "group[3].nodes[2]\n" + "group[3].nodes[0].index 4\n" + "group[3].nodes[1].index 5\n"); +} + +std::string dist_config_3_nodes_in_1_group() { + return ("redundancy 2\n" + "group[2]\n" + "group[0].name \"invalid\"\n" + "group[0].index \"invalid\"\n" + "group[0].partitions 1|*\n" + "group[0].nodes[0]\n" + "group[1].name rack0\n" + "group[1].index 0\n" + "group[1].nodes[3]\n" + "group[1].nodes[0].index 0\n" + "group[1].nodes[1].index 1\n" + "group[1].nodes[2].index 2\n"); +} + +std::string +make_string_list(std::string s, uint32_t count) +{ + std::ostringstream ost; + for (uint32_t i = 0; i < count; ++i) { + if (i > 0) { + ost << ","; + } + ost << s; + } + return ost.str(); +} + +std::string +make_request_bucket_info_strings(uint32_t count) +{ + return make_string_list("Request bucket info", count); +} + +} + + +std::string +TopLevelBucketDBUpdaterTest::get_node_list(const std::vector<uint16_t>& nodes, size_t count) +{ + std::ostringstream ost; + bool first = true; + for (const auto node : nodes) { + for (uint32_t i = 0; i < count; ++i) { + if (!first) { + ost << ","; + } + ost << node; + first = false; + } + } + return ost.str(); +} + +std::string +TopLevelBucketDBUpdaterTest::get_node_list(const std::vector<uint16_t>& nodes) +{ + return get_node_list(nodes, _bucket_spaces.size()); +} + +void +TopLevelBucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition( + vespalib::stringref initial_state_str, + uint32_t initial_buckets, + uint32_t initial_expected_msgs, + vespalib::stringref pending_state_str, + uint32_t pending_buckets, + uint32_t pending_expected_msgs) +{ + lib::ClusterState initial_state(initial_state_str); + set_cluster_state(initial_state); + ASSERT_EQ(message_count(initial_expected_msgs), _sender.commands().size()); + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering( + initial_state, message_count(initial_expected_msgs), initial_buckets)); + _sender.clear(); + + lib::ClusterState pending_state(pending_state_str); // Ownership change + set_cluster_state_bundle(lib::ClusterStateBundle(pending_state, {}, true)); + ASSERT_EQ(message_count(pending_expected_msgs), _sender.commands().size()); + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering( + pending_state, message_count(pending_expected_msgs), pending_buckets)); + _sender.clear(); +} + +TEST_F(TopLevelBucketDBUpdaterTest, normal_usage) { + set_cluster_state(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3")); + + ASSERT_EQ(message_count(3), _sender.commands().size()); + + // Ensure distribution hash is set correctly + ASSERT_EQ(_component->getDistribution()->getNodeGraph().getDistributionConfigHash(), + dynamic_cast<const RequestBucketInfoCommand&>(*_sender.command(0)).getDistributionHash()); + + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"), + *_sender.command(0), 10)); + + _sender.clear(); + + // Optimization for not refetching unneeded data after cluster state + // change is only implemented after completion of previous cluster state + set_cluster_state("distributor:2 .0.s:i storage:3"); + + ASSERT_EQ(message_count(3), _sender.commands().size()); + // Expect reply of first set SystemState request. + ASSERT_EQ(size_t(1), _sender.replies().size()); + + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering( + lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"), + message_count(3), 10)); + ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(10, "distributor:2 storage:3")); +} + +TEST_F(TopLevelBucketDBUpdaterTest, distributor_change) { + int num_buckets = 100; + + // First sends request + set_cluster_state("distributor:2 .0.s:i .1.s:i storage:3"); + ASSERT_EQ(message_count(3), _sender.commands().size()); + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"), + message_count(3), num_buckets)); + _sender.clear(); + + // No change from initializing to up (when done with last job) + set_cluster_state("distributor:2 storage:3"); + ASSERT_EQ(size_t(0), _sender.commands().size()); + _sender.clear(); + + // Adding node. No new read requests, but buckets thrown + set_cluster_state("distributor:3 storage:3"); + ASSERT_EQ(size_t(0), _sender.commands().size()); + ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(num_buckets, "distributor:3 storage:3")); + _sender.clear(); + + // Removing distributor. Need to refetch new data from all nodes. + set_cluster_state("distributor:2 storage:3"); + ASSERT_EQ(message_count(3), _sender.commands().size()); + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(lib::ClusterState("distributor:2 storage:3"), + message_count(3), num_buckets)); + _sender.clear(); + ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(num_buckets, "distributor:2 storage:3")); +} + +TEST_F(TopLevelBucketDBUpdaterTest, distributor_change_with_grouping) { + set_distribution(dist_config_6_nodes_across_2_groups()); + int numBuckets = 100; + + set_cluster_state("distributor:6 storage:6"); + ASSERT_EQ(message_count(6), _sender.commands().size()); + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(lib::ClusterState("distributor:6 storage:6"), + message_count(6), numBuckets)); + _sender.clear(); + + // Distributor going down in other group, no change + set_cluster_state("distributor:6 .5.s:d storage:6"); + ASSERT_EQ(size_t(0), _sender.commands().size()); + _sender.clear(); + + set_cluster_state("distributor:6 storage:6"); + ASSERT_EQ(size_t(0), _sender.commands().size()); + ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(numBuckets, "distributor:6 storage:6")); + _sender.clear(); + + // Unchanged grouping cause no change. + set_distribution(dist_config_6_nodes_across_2_groups()); + ASSERT_EQ(size_t(0), _sender.commands().size()); + + // Changed grouping cause change + set_distribution(dist_config_6_nodes_across_4_groups()); + + ASSERT_EQ(message_count(6), _sender.commands().size()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, normal_usage_initializing) { + set_cluster_state("distributor:1 .0.s:i storage:1 .0.s:i"); + + ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size()); + + // Not yet passing on system state. + ASSERT_EQ(size_t(0), _sender_down.commands().size()); + + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(lib::ClusterState("distributor:1 .0.s:i storage:1"), + _bucket_spaces.size(), 10, 10)); + + ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(10, "distributor:1 storage:1")); + + for (int i = 10; i < 20; ++i) { + ASSERT_NO_FATAL_FAILURE(verify_invalid(document::BucketId(16, i), 0)); + } + + // Pass on cluster state and recheck buckets now. + ASSERT_EQ(size_t(1), _sender_down.commands().size()); + + _sender.clear(); + _sender_down.clear(); + + set_cluster_state("distributor:1 .0.s:i storage:1"); + + // Send a new request bucket info up. + ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size()); + + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(lib::ClusterState("distributor:1 .0.s:i storage:1"), + _bucket_spaces.size(), 20)); + + // Pass on cluster state and recheck buckets now. + ASSERT_EQ(size_t(1), _sender_down.commands().size()); + + ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(20, "distributor:1 storage:1")); +} + +TEST_F(TopLevelBucketDBUpdaterTest, failed_request_bucket_info) { + set_cluster_state("distributor:1 .0.s:i storage:1"); + + // 2 messages sent up: 1 to the nodes, and one reply to the setsystemstate. + ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size()); + + { + for (uint32_t i = 0; i < _bucket_spaces.size(); ++i) { + auto reply = make_fake_bucket_reply(lib::ClusterState("distributor:1 .0.s:i storage:1"), + dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(i)), + 0, + 10); + reply->setResult(api::ReturnCode::NOT_CONNECTED); + bucket_db_updater().onRequestBucketInfoReply(reply); + } + + // Trigger that delayed message is sent + fake_clock().addSecondsToTime(10); + bucket_db_updater().resend_delayed_messages(); + } + + // Should be resent. + ASSERT_EQ(make_request_bucket_info_strings(message_count(2)), _sender.getCommands()); + + ASSERT_EQ(size_t(0), _sender_down.commands().size()); + + for (uint32_t i = 0; i < _bucket_spaces.size(); ++i) { + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:1 .0.s:i storage:1"), + *_sender.command(_bucket_spaces.size() + i), 10)); + } + + for (int i=0; i<10; i++) { + EXPECT_EQ("", + verify_bucket(document::BucketId(16, i), + lib::ClusterState("distributor:1 storage:1"))); + } + + // Set system state should now be passed on + EXPECT_EQ("Set system state", _sender_down.getCommands()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, down_while_init) { + ASSERT_NO_FATAL_FAILURE(set_storage_nodes(3)); + + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:1 storage:3"), + *_sender.command(0), 5)); + + set_cluster_state("distributor:1 storage:3 .1.s:d"); + + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:1 storage:3"), + *_sender.command(2), 5)); + + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:1 storage:3"), + *_sender.command(1), 5)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, node_down) { + ASSERT_NO_FATAL_FAILURE(set_storage_nodes(3)); + enable_distributor_cluster_state("distributor:1 storage:3"); + + for (int i = 1; i < 100; ++i) { + add_ideal_nodes(document::BucketId(16, i)); + } + + EXPECT_TRUE(bucket_exists_that_has_node(100, 1)); + + set_cluster_state("distributor:1 storage:3 .1.s:d"); + + EXPECT_FALSE(bucket_exists_that_has_node(100, 1)); +} + + +TEST_F(TopLevelBucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node) { + ASSERT_NO_FATAL_FAILURE(set_storage_nodes(3)); + enable_distributor_cluster_state("distributor:1 storage:3"); + + for (int i = 1; i < 100; ++i) { + add_ideal_nodes(document::BucketId(16, i)); + } + + EXPECT_TRUE(bucket_exists_that_has_node(100, 1)); + + set_cluster_state("distributor:1 storage:3 .1.s:m"); + + EXPECT_FALSE(bucket_exists_that_has_node(100, 1)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, node_down_copies_get_in_sync) { + ASSERT_NO_FATAL_FAILURE(set_storage_nodes(3)); + document::BucketId bid(16, 1); + + add_nodes_to_stripe_bucket_db(bid, "0=3,1=2,2=3"); + + set_cluster_state("distributor:1 storage:3 .1.s:d"); + + EXPECT_EQ("BucketId(0x4000000000000001) : " + "node(idx=0,crc=0x3,docs=3/3,bytes=3/3,trusted=true,active=false,ready=false), " + "node(idx=2,crc=0x3,docs=3/3,bytes=3/3,trusted=true,active=false,ready=false)", + dump_bucket(bid)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, initializing_while_recheck) { + lib::ClusterState state("distributor:1 storage:2 .0.s:i .0.i:0.1"); + set_cluster_state(state); + + ASSERT_EQ(message_count(2), _sender.commands().size()); + ASSERT_EQ(size_t(0), _sender_down.commands().size()); + + auto bucket = makeDocumentBucket(document::BucketId(16, 3)); + stripe_of_bucket(bucket.getBucketId()).bucket_db_updater().recheckBucketInfo(1, bucket); + + for (uint32_t i = 0; i < message_count(2); ++i) { + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(state, *_sender.command(i), 100)); + } + + // Now we can pass on system state. + ASSERT_EQ(size_t(1), _sender_down.commands().size()); + EXPECT_EQ(MessageType::SETSYSTEMSTATE, _sender_down.command(0)->getType()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, bit_change) { + std::vector<document::BucketId> bucketlist; + + { + set_cluster_state("bits:14 storage:1 distributor:2"); + + ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size()); + + for (uint32_t bsi = 0; bsi < _bucket_spaces.size(); ++bsi) { + ASSERT_EQ(_sender.command(bsi)->getType(), MessageType::REQUESTBUCKETINFO); + const auto &req = dynamic_cast<const RequestBucketInfoCommand &>(*_sender.command(bsi)); + auto sreply = std::make_shared<RequestBucketInfoReply>(req); + sreply->setAddress(storage_address(0)); + auto& vec = sreply->getBucketInfo(); + if (req.getBucketSpace() == FixedBucketSpaces::default_space()) { + int cnt=0; + for (int i=0; cnt < 2; i++) { + auto distribution = _component->getDistribution(); + std::vector<uint16_t> distributors; + if (distribution->getIdealDistributorNode( + lib::ClusterState("bits:14 storage:1 distributor:2"), + document::BucketId(16, i)) + == 0) + { + vec.push_back(api::RequestBucketInfoReply::Entry( + document::BucketId(16, i), + api::BucketInfo(10,1,1))); + + bucketlist.push_back(document::BucketId(16, i)); + cnt++; + } + } + } + + bucket_db_updater().onRequestBucketInfoReply(sreply); + } + } + + EXPECT_EQ("BucketId(0x4000000000000001) : " + "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)", + dump_bucket(bucketlist[0])); + EXPECT_EQ("BucketId(0x4000000000000002) : " + "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)", + dump_bucket(bucketlist[1])); + + { + _sender.clear(); + set_cluster_state("bits:16 storage:1 distributor:2"); + + ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size()); + for (uint32_t bsi = 0; bsi < _bucket_spaces.size(); ++bsi) { + + ASSERT_EQ(_sender.command(bsi)->getType(), MessageType::REQUESTBUCKETINFO); + const auto &req = dynamic_cast<const RequestBucketInfoCommand &>(*_sender.command(bsi)); + auto sreply = std::make_shared<RequestBucketInfoReply>(req); + sreply->setAddress(storage_address(0)); + sreply->setResult(api::ReturnCode::OK); + if (req.getBucketSpace() == FixedBucketSpaces::default_space()) { + api::RequestBucketInfoReply::EntryVector &vec = sreply->getBucketInfo(); + + for (uint32_t i = 0; i < 3; ++i) { + vec.push_back(api::RequestBucketInfoReply::Entry( + document::BucketId(16, i), + api::BucketInfo(10,1,1))); + } + + vec.push_back(api::RequestBucketInfoReply::Entry( + document::BucketId(16, 4), + api::BucketInfo(10,1,1))); + } + + bucket_db_updater().onRequestBucketInfoReply(sreply); + } + } + + EXPECT_EQ("BucketId(0x4000000000000000) : " + "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)", + dump_bucket(document::BucketId(16, 0))); + EXPECT_EQ("BucketId(0x4000000000000001) : " + "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)", + dump_bucket(document::BucketId(16, 1))); + EXPECT_EQ("BucketId(0x4000000000000002) : " + "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)", + dump_bucket(document::BucketId(16, 2))); + EXPECT_EQ("BucketId(0x4000000000000004) : " + "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)", + dump_bucket(document::BucketId(16, 4))); + + _sender.clear(); + set_cluster_state("storage:1 distributor:2 .1.s:i"); + + _sender.clear(); + set_cluster_state("storage:1 distributor:2"); +}; + +TEST_F(TopLevelBucketDBUpdaterTest, recheck_node_with_failure) { + ASSERT_NO_FATAL_FAILURE(initialize_nodes_and_buckets(3, 5)); + + _sender.clear(); + + auto bucket = makeDocumentBucket(document::BucketId(16, 3)); + auto& stripe_bucket_db_updater = stripe_of_bucket(bucket.getBucketId()).bucket_db_updater(); + stripe_bucket_db_updater.recheckBucketInfo(1, bucket); + + ASSERT_EQ(size_t(1), _sender.commands().size()); + + uint16_t index = 0; + { + auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0)); + ASSERT_EQ(size_t(1), rbi.getBuckets().size()); + EXPECT_EQ(bucket.getBucketId(), rbi.getBuckets()[0]); + auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi); + const api::StorageMessageAddress *address = _sender.command(0)->getAddress(); + index = address->getIndex(); + reply->setResult(api::ReturnCode::NOT_CONNECTED); + stripe_bucket_db_updater.onRequestBucketInfoReply(reply); + // Trigger that delayed message is sent + fake_clock().addSecondsToTime(10); + stripe_bucket_db_updater.resendDelayedMessages(); + } + + ASSERT_EQ(size_t(2), _sender.commands().size()); + + set_cluster_state(vespalib::make_string("distributor:1 storage:3 .%d.s:d", index)); + + // Recheck bucket. + { + auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(1)); + ASSERT_EQ(size_t(1), rbi.getBuckets().size()); + EXPECT_EQ(bucket.getBucketId(), rbi.getBuckets()[0]); + auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi); + reply->setResult(api::ReturnCode::NOT_CONNECTED); + stripe_bucket_db_updater.onRequestBucketInfoReply(reply); + } + + // Should not retry since node is down. + EXPECT_EQ(size_t(2), _sender.commands().size()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, recheck_node) { + ASSERT_NO_FATAL_FAILURE(initialize_nodes_and_buckets(3, 5)); + + _sender.clear(); + + auto bucket = makeDocumentBucket(document::BucketId(16, 3)); + auto& stripe_bucket_db_updater = stripe_of_bucket(bucket.getBucketId()).bucket_db_updater(); + stripe_bucket_db_updater.recheckBucketInfo(1, bucket); + + ASSERT_EQ(size_t(1), _sender.commands().size()); + + auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0)); + ASSERT_EQ(size_t(1), rbi.getBuckets().size()); + EXPECT_EQ(bucket.getBucketId(), rbi.getBuckets()[0]); + + auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi); + reply->getBucketInfo().push_back( + api::RequestBucketInfoReply::Entry(document::BucketId(16, 3), + api::BucketInfo(20, 10, 12, 50, 60, true, true))); + stripe_bucket_db_updater.onRequestBucketInfoReply(reply); + + lib::ClusterState state("distributor:1 storage:3"); + for (uint32_t i = 0; i < 3; i++) { + EXPECT_EQ(get_ideal_str(document::BucketId(16, i), state), + get_nodes(document::BucketId(16, i))); + } + + for (uint32_t i = 4; i < 5; i++) { + EXPECT_EQ(get_ideal_str(document::BucketId(16, i), state), + get_nodes(document::BucketId(16, i))); + } + + BucketDatabase::Entry entry = get_bucket(bucket); + ASSERT_TRUE(entry.valid()); + + const BucketCopy* copy = entry->getNode(1); + ASSERT_TRUE(copy != nullptr); + EXPECT_EQ(api::BucketInfo(20,10,12, 50, 60, true, true), copy->getBucketInfo()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, notify_bucket_change) { + enable_distributor_cluster_state("distributor:1 storage:1"); + + add_nodes_to_stripe_bucket_db(document::BucketId(16, 1), "0=1234"); + _sender.replies().clear(); + + { + api::BucketInfo info(1, 2, 3, 4, 5, true, true); + auto cmd = std::make_shared<api::NotifyBucketChangeCommand>( + makeDocumentBucket(document::BucketId(16, 1)), info); + cmd->setSourceIndex(0); + stripe_of_bucket(document::BucketId(16, 1)).bucket_db_updater().onNotifyBucketChange(cmd); + } + + { + api::BucketInfo info(10, 11, 12, 13, 14, false, false); + auto cmd = std::make_shared<api::NotifyBucketChangeCommand>( + makeDocumentBucket(document::BucketId(16, 2)), info); + cmd->setSourceIndex(0); + stripe_of_bucket(document::BucketId(16, 2)).bucket_db_updater().onNotifyBucketChange(cmd); + } + + // Must receive reply + ASSERT_EQ(size_t(2), _sender.replies().size()); + + for (int i = 0; i < 2; ++i) { + ASSERT_EQ(MessageType::NOTIFYBUCKETCHANGE_REPLY, _sender.reply(i)->getType()); + } + + // No database update until request bucket info replies have been received. + EXPECT_EQ("BucketId(0x4000000000000001) : " + "node(idx=0,crc=0x4d2,docs=1234/1234,bytes=1234/1234," + "trusted=false,active=false,ready=false)", + dump_bucket(document::BucketId(16, 1))); + EXPECT_EQ(std::string("NONEXISTING"), dump_bucket(document::BucketId(16, 2))); + + ASSERT_EQ(size_t(2), _sender.commands().size()); + + std::vector<api::BucketInfo> infos; + infos.push_back(api::BucketInfo(4567, 200, 2000, 400, 4000, true, true)); + infos.push_back(api::BucketInfo(8999, 300, 3000, 500, 5000, false, false)); + + for (int i = 0; i < 2; ++i) { + auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(i)); + ASSERT_EQ(size_t(1), rbi.getBuckets().size()); + document::BucketId bucket_id(16, i + 1); + EXPECT_EQ(bucket_id, rbi.getBuckets()[0]); + + auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi); + reply->getBucketInfo().push_back(api::RequestBucketInfoReply::Entry(bucket_id, infos[i])); + stripe_of_bucket(bucket_id).bucket_db_updater().onRequestBucketInfoReply(reply); + } + + EXPECT_EQ("BucketId(0x4000000000000001) : " + "node(idx=0,crc=0x11d7,docs=200/400,bytes=2000/4000,trusted=true,active=true,ready=true)", + dump_bucket(document::BucketId(16, 1))); + EXPECT_EQ("BucketId(0x4000000000000002) : " + "node(idx=0,crc=0x2327,docs=300/500,bytes=3000/5000,trusted=true,active=false,ready=false)", + dump_bucket(document::BucketId(16, 2))); +} + +TEST_F(TopLevelBucketDBUpdaterTest, notify_bucket_change_from_node_down) { + enable_distributor_cluster_state("distributor:1 storage:2"); + + document::BucketId bucket_id(16, 1); + add_nodes_to_stripe_bucket_db(bucket_id, "1=1234"); + + _sender.replies().clear(); + + { + api::BucketInfo info(8999, 300, 3000, 500, 5000, false, false); + auto cmd = std::make_shared<api::NotifyBucketChangeCommand>(makeDocumentBucket(bucket_id), info); + cmd->setSourceIndex(0); + stripe_of_bucket(bucket_id).bucket_db_updater().onNotifyBucketChange(cmd); + } + // Enable here to avoid having request bucket info be silently swallowed + // (send_request_bucket_info drops message if node is down). + enable_distributor_cluster_state("distributor:1 storage:2 .0.s:d"); + + ASSERT_EQ("BucketId(0x4000000000000001) : " + "node(idx=1,crc=0x4d2,docs=1234/1234,bytes=1234/1234,trusted=false,active=false,ready=false)", + dump_bucket(bucket_id)); + + ASSERT_EQ(size_t(1), _sender.replies().size()); + ASSERT_EQ(MessageType::NOTIFYBUCKETCHANGE_REPLY, _sender.reply(0)->getType()); + + // Currently, this pending operation will be auto-flushed when the cluster state + // changes so the behavior is still correct. Keep this test around to prevent + // regressions here. + ASSERT_EQ(size_t(1), _sender.commands().size()); + auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0)); + ASSERT_EQ(size_t(1), rbi.getBuckets().size()); + EXPECT_EQ(bucket_id, rbi.getBuckets()[0]); + + auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi); + reply->getBucketInfo().push_back( + api::RequestBucketInfoReply::Entry( + bucket_id, + api::BucketInfo(8999, 300, 3000, 500, 5000, false, false))); + stripe_of_bucket(bucket_id).bucket_db_updater().onRequestBucketInfoReply(reply); + + // No change + EXPECT_EQ("BucketId(0x4000000000000001) : " + "node(idx=1,crc=0x4d2,docs=1234/1234,bytes=1234/1234,trusted=false,active=false,ready=false)", + dump_bucket(bucket_id)); +} + +/** + * Test that NotifyBucketChange received while there's a pending cluster state + * waits until the cluster state has been enabled as current before it sends off + * the single bucket info requests. This is to prevent a race condition where + * the replies to bucket info requests for buckets that would be owned by the + * distributor in the pending state but not by the current state would be + * discarded when attempted inserted into the bucket database. + */ +TEST_F(TopLevelBucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket_info_requests) { + set_cluster_state("distributor:1 storage:1"); + ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size()); + + document::BucketId bucket_id(16, 1); + { + api::BucketInfo info(8999, 300, 3000, 500, 5000, false, false); + auto cmd(std::make_shared<api::NotifyBucketChangeCommand>( + makeDocumentBucket(bucket_id), info)); + cmd->setSourceIndex(0); + stripe_of_bucket(bucket_id).bucket_db_updater().onNotifyBucketChange(cmd); + } + + ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size()); + + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(lib::ClusterState("distributor:1 storage:1"), + _bucket_spaces.size(), 10)); + + ASSERT_EQ(_bucket_spaces.size() + 1, _sender.commands().size()); + + { + auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(_bucket_spaces.size())); + ASSERT_EQ(size_t(1), rbi.getBuckets().size()); + EXPECT_EQ(bucket_id, rbi.getBuckets()[0]); + } + _sender.clear(); + + // Queue must be cleared once pending state is enabled. + { + lib::ClusterState state("distributor:1 storage:2"); + uint32_t expected_msgs = _bucket_spaces.size(), dummy_buckets_to_return = 1; + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state, expected_msgs, dummy_buckets_to_return)); + } + ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size()); + { + auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0)); + EXPECT_EQ(size_t(0), rbi.getBuckets().size()); + } +} + +TEST_F(TopLevelBucketDBUpdaterTest, merge_reply) { + enable_distributor_cluster_state("distributor:1 storage:3"); + + document::BucketId bucket_id(16, 1234); + add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234"); + + std::vector<api::MergeBucketCommand::Node> nodes; + nodes.push_back(api::MergeBucketCommand::Node(0)); + nodes.push_back(api::MergeBucketCommand::Node(1)); + nodes.push_back(api::MergeBucketCommand::Node(2)); + + api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0); + auto reply = std::make_shared<api::MergeBucketReply>(cmd); + + _sender.clear(); + stripe_of_bucket(bucket_id).bucket_db_updater().onMergeBucketReply(reply); + + ASSERT_EQ(size_t(3), _sender.commands().size()); + + for (uint32_t i = 0; i < 3; i++) { + auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.command(i)); + + ASSERT_TRUE(req.get() != nullptr); + ASSERT_EQ(size_t(1), req->getBuckets().size()); + EXPECT_EQ(bucket_id, req->getBuckets()[0]); + + auto reqreply = std::make_shared<api::RequestBucketInfoReply>(*req); + reqreply->getBucketInfo().push_back( + api::RequestBucketInfoReply::Entry(bucket_id, + api::BucketInfo(10 * (i + 1), 100 * (i +1), 1000 * (i+1)))); + + stripe_of_bucket(bucket_id).bucket_db_updater().onRequestBucketInfoReply(reqreply); + } + + EXPECT_EQ("BucketId(0x40000000000004d2) : " + "node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false,ready=false), " + "node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false,ready=false), " + "node(idx=2,crc=0x1e,docs=300/300,bytes=3000/3000,trusted=false,active=false,ready=false)", + dump_bucket(bucket_id)); +}; + +TEST_F(TopLevelBucketDBUpdaterTest, merge_reply_node_down) { + enable_distributor_cluster_state("distributor:1 storage:3"); + std::vector<api::MergeBucketCommand::Node> nodes; + + document::BucketId bucket_id(16, 1234); + add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234"); + + for (uint32_t i = 0; i < 3; ++i) { + nodes.push_back(api::MergeBucketCommand::Node(i)); + } + + api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0); + auto reply = std::make_shared<api::MergeBucketReply>(cmd); + + set_cluster_state(lib::ClusterState("distributor:1 storage:2")); + + _sender.clear(); + stripe_of_bucket(bucket_id).bucket_db_updater().onMergeBucketReply(reply); + + ASSERT_EQ(size_t(2), _sender.commands().size()); + + for (uint32_t i = 0; i < 2; i++) { + auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.command(i)); + + ASSERT_TRUE(req.get() != nullptr); + ASSERT_EQ(size_t(1), req->getBuckets().size()); + EXPECT_EQ(bucket_id, req->getBuckets()[0]); + + auto reqreply = std::make_shared<api::RequestBucketInfoReply>(*req); + reqreply->getBucketInfo().push_back( + api::RequestBucketInfoReply::Entry( + bucket_id, + api::BucketInfo(10 * (i + 1), 100 * (i +1), 1000 * (i+1)))); + stripe_of_bucket(bucket_id).bucket_db_updater().onRequestBucketInfoReply(reqreply); + } + + EXPECT_EQ("BucketId(0x40000000000004d2) : " + "node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false,ready=false), " + "node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false,ready=false)", + dump_bucket(bucket_id)); +}; + +TEST_F(TopLevelBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) { + enable_distributor_cluster_state("distributor:1 storage:3"); + std::vector<api::MergeBucketCommand::Node> nodes; + + document::BucketId bucket_id(16, 1234); + add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234"); + + for (uint32_t i = 0; i < 3; ++i) { + nodes.push_back(api::MergeBucketCommand::Node(i)); + } + + api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0); + auto reply = std::make_shared<api::MergeBucketReply>(cmd); + + _sender.clear(); + stripe_of_bucket(bucket_id).bucket_db_updater().onMergeBucketReply(reply); + + ASSERT_EQ(size_t(3), _sender.commands().size()); + + set_cluster_state(lib::ClusterState("distributor:1 storage:2")); + + for (uint32_t i = 0; i < 3; i++) { + auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.command(i)); + + ASSERT_TRUE(req.get() != nullptr); + ASSERT_EQ(size_t(1), req->getBuckets().size()); + EXPECT_EQ(bucket_id, req->getBuckets()[0]); + + auto reqreply = std::make_shared<api::RequestBucketInfoReply>(*req); + reqreply->getBucketInfo().push_back( + api::RequestBucketInfoReply::Entry( + bucket_id, + api::BucketInfo(10 * (i + 1), 100 * (i +1), 1000 * (i+1)))); + stripe_of_bucket(bucket_id).bucket_db_updater().onRequestBucketInfoReply(reqreply); + } + + EXPECT_EQ("BucketId(0x40000000000004d2) : " + "node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false,ready=false), " + "node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false,ready=false)", + dump_bucket(bucket_id)); +}; + +TEST_F(TopLevelBucketDBUpdaterTest, flush) { + enable_distributor_cluster_state("distributor:1 storage:3"); + _sender.clear(); + + document::BucketId bucket_id(16, 1234); + add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234"); + + std::vector<api::MergeBucketCommand::Node> nodes; + for (uint32_t i = 0; i < 3; ++i) { + nodes.push_back(api::MergeBucketCommand::Node(i)); + } + + api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0); + auto reply = std::make_shared<api::MergeBucketReply>(cmd); + + _sender.clear(); + stripe_of_bucket(bucket_id).bucket_db_updater().onMergeBucketReply(reply); + + ASSERT_EQ(size_t(3), _sender.commands().size()); + ASSERT_EQ(size_t(0), _sender_down.replies().size()); + + stripe_of_bucket(bucket_id).bucket_db_updater().flush(); + // Flushing should drop all merge bucket replies + EXPECT_EQ(size_t(0), _sender_down.commands().size()); +} + +std::string +TopLevelBucketDBUpdaterTest::get_sent_nodes(const std::string& old_cluster_state, + const std::string& new_cluster_state) +{ + auto fixture = create_pending_state_fixture_for_state_change(old_cluster_state, new_cluster_state); + sort_sent_messages_by_index(fixture->sender); + + std::ostringstream ost; + for (uint32_t i = 0; i < fixture->sender.commands().size(); i++) { + auto& req = dynamic_cast<RequestBucketInfoCommand&>(*fixture->sender.command(i)); + + if (i > 0) { + ost << ","; + } + + ost << req.getAddress()->getIndex(); + } + + return ost.str(); +} + +std::string +TopLevelBucketDBUpdaterTest::get_sent_nodes_distribution_changed(const std::string& old_cluster_state) +{ + DistributorMessageSenderStub sender; + + framework::defaultimplementation::FakeClock clock; + auto cluster_info = create_cluster_info(old_cluster_state); + std::unique_ptr<PendingClusterState> state( + PendingClusterState::createForDistributionChange( + clock, cluster_info, sender, top_level_bucket_space_repo(), api::Timestamp(1))); + + sort_sent_messages_by_index(sender); + + std::ostringstream ost; + for (uint32_t i = 0; i < sender.commands().size(); i++) { + auto& req = dynamic_cast<RequestBucketInfoCommand&>(*sender.command(i)); + + if (i > 0) { + ost << ","; + } + + ost << req.getAddress()->getIndex(); + } + + return ost.str(); +} + +TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_send_messages) { + EXPECT_EQ(get_node_list({0, 1, 2}), + get_sent_nodes("cluster:d", + "distributor:1 storage:3")); + + EXPECT_EQ(get_node_list({0, 1}), + get_sent_nodes("cluster:d", + "distributor:1 storage:3 .2.s:m")); + + EXPECT_EQ(get_node_list({2}), + get_sent_nodes("distributor:1 storage:2", + "distributor:1 storage:3")); + + EXPECT_EQ(get_node_list({2, 3, 4, 5}), + get_sent_nodes("distributor:1 storage:2", + "distributor:1 storage:6")); + + EXPECT_EQ(get_node_list({0, 1, 2}), + get_sent_nodes("distributor:4 storage:3", + "distributor:3 storage:3")); + + EXPECT_EQ(get_node_list({0, 1, 2, 3}), + get_sent_nodes("distributor:4 storage:3", + "distributor:4 .2.s:d storage:4")); + + EXPECT_EQ("", + get_sent_nodes("distributor:4 storage:3", + "distributor:4 .0.s:d storage:4")); + + EXPECT_EQ("", + get_sent_nodes("distributor:3 storage:3", + "distributor:4 storage:3")); + + EXPECT_EQ(get_node_list({2}), + get_sent_nodes("distributor:3 storage:3 .2.s:i", + "distributor:3 storage:3")); + + EXPECT_EQ(get_node_list({1}), + get_sent_nodes("distributor:3 storage:3 .1.s:d", + "distributor:3 storage:3")); + + EXPECT_EQ(get_node_list({1, 2, 4}), + get_sent_nodes("distributor:3 storage:4 .1.s:d .2.s:i", + "distributor:3 storage:5")); + + EXPECT_EQ("", + get_sent_nodes("distributor:1 storage:3", + "cluster:d")); + + EXPECT_EQ("", + get_sent_nodes("distributor:1 storage:3", + "distributor:1 storage:3")); + + EXPECT_EQ("", + get_sent_nodes("distributor:1 storage:3", + "cluster:d distributor:1 storage:6")); + + EXPECT_EQ("", + get_sent_nodes("distributor:3 storage:3", + "distributor:3 .2.s:m storage:3")); + + EXPECT_EQ(get_node_list({0, 1, 2}), + get_sent_nodes("distributor:3 .2.s:m storage:3", + "distributor:3 .2.s:d storage:3")); + + EXPECT_EQ("", + get_sent_nodes("distributor:3 .2.s:m storage:3", + "distributor:3 storage:3")); + + EXPECT_EQ(get_node_list({0, 1, 2}), + get_sent_nodes_distribution_changed("distributor:3 storage:3")); + + EXPECT_EQ(get_node_list({0, 1}), + get_sent_nodes("distributor:10 storage:2", + "distributor:10 .1.s:d storage:2")); + + EXPECT_EQ("", + get_sent_nodes("distributor:2 storage:2", + "distributor:3 .2.s:i storage:2")); + + EXPECT_EQ(get_node_list({0, 1, 2}), + get_sent_nodes("distributor:3 storage:3", + "distributor:3 .2.s:s storage:3")); + + EXPECT_EQ("", + get_sent_nodes("distributor:3 .2.s:s storage:3", + "distributor:3 .2.s:d storage:3")); + + EXPECT_EQ(get_node_list({1}), + get_sent_nodes("distributor:3 storage:3 .1.s:m", + "distributor:3 storage:3")); + + EXPECT_EQ("", + get_sent_nodes("distributor:3 storage:3", + "distributor:3 storage:3 .1.s:m")); +}; + +TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_receive) { + DistributorMessageSenderStub sender; + + auto cmd = std::make_shared<api::SetSystemStateCommand>(lib::ClusterState("distributor:1 storage:3")); + + framework::defaultimplementation::FakeClock clock; + auto cluster_info = create_cluster_info("cluster:d"); + OutdatedNodesMap outdated_nodes_map; + std::unique_ptr<PendingClusterState> state( + PendingClusterState::createForClusterStateChange( + clock, cluster_info, sender, top_level_bucket_space_repo(), + cmd, outdated_nodes_map, api::Timestamp(1))); + + ASSERT_EQ(message_count(3), sender.commands().size()); + + sort_sent_messages_by_index(sender); + + std::ostringstream ost; + for (uint32_t i = 0; i < sender.commands().size(); i++) { + auto* req = dynamic_cast<RequestBucketInfoCommand*>(sender.command(i).get()); + ASSERT_TRUE(req != nullptr); + + auto rep = std::make_shared<RequestBucketInfoReply>(*req); + + rep->getBucketInfo().push_back( + RequestBucketInfoReply::Entry( + document::BucketId(16, i), + api::BucketInfo(i, i, i, i, i))); + + ASSERT_TRUE(state->onRequestBucketInfoReply(rep)); + ASSERT_EQ((i == (sender.commands().size() - 1)), state->done()); + } + + auto& pending_transition = state->getPendingBucketSpaceDbTransition(makeBucketSpace()); + EXPECT_EQ(3u, pending_transition.results().size()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_with_group_down) { + std::string config = dist_config_6_nodes_across_4_groups(); + config += "distributor_auto_ownership_transfer_on_whole_group_down true\n"; + set_distribution(config); + + // Group config has nodes {0, 1}, {2, 3}, {4, 5} + // We're node index 0. + + // Entire group 1 goes down. Must refetch from all nodes. + EXPECT_EQ(get_node_list({0, 1, 2, 3, 4, 5}), + get_sent_nodes("distributor:6 storage:6", + "distributor:6 .2.s:d .3.s:d storage:6")); + + // But don't fetch if not the entire group is down. + EXPECT_EQ("", + get_sent_nodes("distributor:6 storage:6", + "distributor:6 .2.s:d storage:6")); +} + +TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_handover) { + std::string config = dist_config_6_nodes_across_4_groups(); + config += "distributor_auto_ownership_transfer_on_whole_group_down false\n"; + set_distribution(config); + + // Group is down, but config says to not do anything about it. + EXPECT_EQ(get_node_list({0, 1, 2, 3, 4, 5}, _bucket_spaces.size() - 1), + get_sent_nodes("distributor:6 storage:6", + "distributor:6 .2.s:d .3.s:d storage:6")); +} + + +namespace { + +void +parse_input_data(const std::string& data, + uint64_t timestamp, + PendingClusterState& state, + bool include_bucket_info) +{ + vespalib::StringTokenizer tokenizer(data, "|"); + for (uint32_t i = 0; i < tokenizer.size(); i++) { + vespalib::StringTokenizer tok2(tokenizer[i], ":"); + + uint16_t node = atoi(tok2[0].data()); + + state.setNodeReplied(node); + auto& pending_transition = state.getPendingBucketSpaceDbTransition(makeBucketSpace()); + + vespalib::StringTokenizer tok3(tok2[1], ","); + for (uint32_t j = 0; j < tok3.size(); j++) { + if (include_bucket_info) { + vespalib::StringTokenizer tok4(tok3[j], "/"); + + pending_transition.addNodeInfo( + document::BucketId(16, atoi(tok4[0].data())), + BucketCopy( + timestamp, + node, + api::BucketInfo( + atoi(tok4[1].data()), + atoi(tok4[2].data()), + atoi(tok4[3].data()), + atoi(tok4[2].data()), + atoi(tok4[3].data())))); + } else { + pending_transition.addNodeInfo( + document::BucketId(16, atoi(tok3[j].data())), + BucketCopy(timestamp, + node, + api::BucketInfo(3, 3, 3, 3, 3))); + } + } + } +} + +struct BucketDumper : public BucketDatabase::EntryProcessor +{ + std::ostringstream ost; + bool _include_bucket_info; + + explicit BucketDumper(bool include_bucket_info) + : _include_bucket_info(include_bucket_info) + { + } + + bool process(const BucketDatabase::ConstEntryRef& e) override { + document::BucketId bucket_id(e.getBucketId()); + + ost << uint32_t(bucket_id.getRawId()) << ":"; + for (uint32_t i = 0; i < e->getNodeCount(); ++i) { + if (i > 0) { + ost << ","; + } + const BucketCopy& copy(e->getNodeRef(i)); + ost << copy.getNode(); + if (_include_bucket_info) { + ost << "/" << copy.getChecksum() + << "/" << copy.getDocumentCount() + << "/" << copy.getTotalDocumentSize() + << "/" << (copy.trusted() ? "t" : "u"); + } + } + ost << "|"; + return true; + } +}; + +} + +std::string +TopLevelBucketDBUpdaterTest::merge_bucket_lists( + const lib::ClusterState& old_state, + const std::string& existing_data, + const lib::ClusterState& new_state, + const std::string& new_data, + bool include_bucket_info) +{ + framework::defaultimplementation::FakeClock clock; + framework::MilliSecTimer timer(clock); + + DistributorMessageSenderStub sender; + OutdatedNodesMap outdated_nodes_map; + + { + auto cmd = std::make_shared<api::SetSystemStateCommand>(old_state); + api::Timestamp before_time(1); + auto cluster_info = create_cluster_info("cluster:d"); + + auto state = PendingClusterState::createForClusterStateChange( + clock, cluster_info, sender, top_level_bucket_space_repo(), + cmd, outdated_nodes_map, before_time); + + parse_input_data(existing_data, before_time, *state, include_bucket_info); + auto guard = acquire_stripe_guard(); + state->merge_into_bucket_databases(*guard); + } + + BucketDumper dumper_tmp(true); + for (auto* s : distributor_stripes()) { + auto& db = s->getBucketSpaceRepo().get(document::FixedBucketSpaces::default_space()).getBucketDatabase(); + db.forEach(dumper_tmp); + } + + { + auto cmd = std::make_shared<api::SetSystemStateCommand>(lib::ClusterState(new_state)); + api::Timestamp after_time(2); + auto cluster_info = create_cluster_info(old_state.toString()); + + auto state = PendingClusterState::createForClusterStateChange( + clock, cluster_info, sender, top_level_bucket_space_repo(), + cmd, outdated_nodes_map, after_time); + + parse_input_data(new_data, after_time, *state, include_bucket_info); + auto guard = acquire_stripe_guard(); + state->merge_into_bucket_databases(*guard); + } + + BucketDumper dumper(include_bucket_info); + for (auto* s : distributor_stripes()) { + auto& db = s->getBucketSpaceRepo().get(document::FixedBucketSpaces::default_space()).getBucketDatabase(); + db.forEach(dumper); + db.clear(); + } + return dumper.ost.str(); +} + +std::string +TopLevelBucketDBUpdaterTest::merge_bucket_lists(const std::string& existing_data, + const std::string& new_data, + bool include_bucket_info) +{ + return merge_bucket_lists( + lib::ClusterState("distributor:1 storage:3"), + existing_data, + lib::ClusterState("distributor:1 storage:3"), + new_data, + include_bucket_info); +} + +TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_merge) { + // Result is on the form: [bucket w/o count bits]:[node indexes]|.. + // Input is on the form: [node]:[bucket w/o count bits]|... + + // Simple initializing case - ask all nodes for info + EXPECT_EQ("4:0,1|2:0,1|6:1,2|1:0,2|5:2,0|3:2,1|", + merge_bucket_lists( + "", + "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6")); + + // New node came up + EXPECT_EQ("4:0,1|2:0,1|6:1,2,3|1:0,2,3|5:2,0,3|3:2,1,3|", + merge_bucket_lists( + "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6", + "3:1,3,5,6")); + + // Node came up with some buckets removed and some added + // Buckets that were removed should not be removed as the node + // didn't lose a disk. + EXPECT_EQ("8:0|4:0,1|2:0,1|6:1,0,2|1:0,2|5:2,0|3:2,1|", + merge_bucket_lists( + "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6", + "0:1,2,6,8")); + + // Bucket info format is "bucketid/checksum/count/size" + // Node went from initializing to up and invalid bucket went to empty. + EXPECT_EQ("2:0/0/0/0/t|", + merge_bucket_lists( + "0:2/0/0/1", + "0:2/0/0/0", + true)); + + EXPECT_EQ("5:1/2/3/4/u,0/0/0/0/u|", + merge_bucket_lists("", "0:5/0/0/0|1:5/2/3/4", true)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) { + // Node went from initializing to up and non-invalid bucket changed. + EXPECT_EQ("2:0/2/3/4/t|3:0/2/4/6/t|", + merge_bucket_lists( + lib::ClusterState("distributor:1 storage:1 .0.s:i"), + "0:2/1/2/3,3/2/4/6", + lib::ClusterState("distributor:1 storage:1"), + "0:2/2/3/4,3/2/4/6", + true)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_state) { + document::BucketId bucket(16, 3); + lib::ClusterState state_before("distributor:1 storage:1"); + { + uint32_t expected_msgs = _bucket_spaces.size(), dummy_buckets_to_return = 1; + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_before, expected_msgs, dummy_buckets_to_return)); + } + _sender.clear(); + + stripe_of_bucket(bucket).bucket_db_updater().recheckBucketInfo(0, makeDocumentBucket(bucket)); + + ASSERT_EQ(size_t(1), _sender.commands().size()); + auto rbi = std::dynamic_pointer_cast<RequestBucketInfoCommand>(_sender.command(0)); + + lib::ClusterState state_after("distributor:3 storage:3"); + + { + uint32_t expected_msgs = message_count(2), dummy_buckets_to_return = 1; + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_after, expected_msgs, dummy_buckets_to_return)); + } + EXPECT_FALSE(distributor_bucket_space(bucket).get_bucket_ownership_flags(bucket).owned_in_current_state()); + + ASSERT_NO_FATAL_FAILURE(send_fake_reply_for_single_bucket_request(*rbi)); + + EXPECT_EQ("NONEXISTING", dump_bucket(bucket)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) { + document::BucketId bucket(16, 3); + lib::ClusterState state_before("distributor:1 storage:1"); + { + uint32_t expected_msgs = _bucket_spaces.size(), dummy_buckets_to_return = 1; + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_before, expected_msgs, dummy_buckets_to_return)); + } + _sender.clear(); + + stripe_of_bucket(bucket).bucket_db_updater().recheckBucketInfo(0, makeDocumentBucket(bucket)); + + ASSERT_EQ(size_t(1), _sender.commands().size()); + auto rbi = std::dynamic_pointer_cast<RequestBucketInfoCommand>(_sender.command(0)); + + lib::ClusterState state_after("distributor:3 storage:3"); + // Set, but _don't_ enable cluster state. We want it to be pending. + set_cluster_state(state_after); + EXPECT_TRUE(distributor_bucket_space(bucket).get_bucket_ownership_flags(bucket).owned_in_current_state()); + EXPECT_FALSE(distributor_bucket_space(bucket).get_bucket_ownership_flags(bucket).owned_in_pending_state()); + + ASSERT_NO_FATAL_FAILURE(send_fake_reply_for_single_bucket_request(*rbi)); + + EXPECT_EQ("NONEXISTING", dump_bucket(bucket)); +} + +/* + * If we get a distribution config change, it's important that cluster states that + * arrive after this--but _before_ the pending cluster state has finished--must trigger + * a full bucket info fetch no matter what the cluster state change was! Otherwise, we + * will with a high likelihood end up not getting the complete view of the buckets in + * the cluster. + */ +TEST_F(TopLevelBucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribution_change_pending) { + lib::ClusterState state_before("distributor:6 storage:6"); + { + uint32_t expected_msgs = message_count(6), dummy_buckets_to_return = 1; + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_before, expected_msgs, dummy_buckets_to_return)); + } + _sender.clear(); + std::string distConfig(dist_config_6_nodes_across_2_groups()); + set_distribution(distConfig); + + sort_sent_messages_by_index(_sender); + ASSERT_EQ(message_count(6), _sender.commands().size()); + // Suddenly, a wild cluster state change appears! Even though this state + // does not in itself imply any bucket changes, it will still overwrite the + // pending cluster state and thus its state of pending bucket info requests. + set_cluster_state("distributor:6 .2.t:12345 storage:6"); + + ASSERT_EQ(message_count(12), _sender.commands().size()); + + // Send replies for first messageCount(6) (outdated requests). + int num_buckets = 10; + for (uint32_t i = 0; i < message_count(6); ++i) { + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:6 storage:6"), + *_sender.command(i), num_buckets)); + } + // No change from these. + ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(1, "distributor:6 storage:6")); + + // Send for current pending. + for (uint32_t i = 0; i < message_count(6); ++i) { + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:6 .2.t:12345 storage:6"), + *_sender.command(i + message_count(6)), + num_buckets)); + } + ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(num_buckets, "distributor:6 storage:6")); + _sender.clear(); + + // No more pending global fetch; this should be a no-op state. + set_cluster_state("distributor:6 .3.t:12345 storage:6"); + EXPECT_EQ(size_t(0), _sender.commands().size()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) { + uint32_t num_buckets = 20; + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(lib::ClusterState("distributor:6 storage:6"), + message_count(6), num_buckets)); + _sender.clear(); + EXPECT_TRUE(all_distributor_stripes_are_in_recovery_mode()); + complete_recovery_mode_on_all_stripes(); + EXPECT_FALSE(all_distributor_stripes_are_in_recovery_mode()); + + set_distribution(dist_config_6_nodes_across_4_groups()); + sort_sent_messages_by_index(_sender); + // No replies received yet, still no recovery mode. + EXPECT_FALSE(all_distributor_stripes_are_in_recovery_mode()); + + ASSERT_EQ(message_count(6), _sender.commands().size()); + num_buckets = 10; + for (uint32_t i = 0; i < message_count(6); ++i) { + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:6 storage:6"), + *_sender.command(i), num_buckets)); + } + + // Pending cluster state (i.e. distribution) has been enabled, which should + // cause recovery mode to be entered. + EXPECT_TRUE(all_distributor_stripes_are_in_recovery_mode()); + complete_recovery_mode_on_all_stripes(); + EXPECT_FALSE(all_distributor_stripes_are_in_recovery_mode()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db_pruning) { + set_distribution(dist_config_3_nodes_in_1_group()); + + constexpr uint32_t n_buckets = 100; + ASSERT_NO_FATAL_FAILURE( + set_and_enable_cluster_state(lib::ClusterState("distributor:6 storage:6"), message_count(6), n_buckets)); + _sender.clear(); + + // Config implies a different node set than the current cluster state, so it's crucial that + // DB pruning is _not_ elided. Yes, this is inherently racing with cluster state changes and + // should be changed to be atomic and controlled by the cluster controller instead of config. + // But this is where we currently are. + set_distribution(dist_config_6_nodes_across_2_groups()); + for (auto* s : distributor_stripes()) { + const auto& db = s->getBucketSpaceRepo().get(document::FixedBucketSpaces::default_space()).getBucketDatabase(); + db.acquire_read_guard()->for_each([&]([[maybe_unused]] uint64_t key, const auto& e) { + auto id = e.getBucketId(); + EXPECT_TRUE(distributor_bucket_space(id).get_bucket_ownership_flags(id).owned_in_pending_state()); + }); + } +} + +TEST_F(TopLevelBucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestamp) { + fake_clock().setAbsoluteTimeInSeconds(101234); + lib::ClusterState state_before("distributor:1 storage:1"); + { + uint32_t expected_msgs = _bucket_spaces.size(), dummy_buckets_to_return = 1; + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_before, expected_msgs, dummy_buckets_to_return)); + } + // setAndEnableClusterState adds n buckets with id (16, i) + document::BucketId bucket(16, 0); + BucketDatabase::Entry e = get_bucket(bucket); + ASSERT_TRUE(e.valid()); + EXPECT_EQ(uint32_t(101234), e->getLastGarbageCollectionTime()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fetch) { + { + lib::ClusterState state_before("distributor:1 storage:1 .0.s:i"); + uint32_t expected_msgs = _bucket_spaces.size(), dummy_buckets_to_return = 0; + // This step is required to make the distributor ready for accepting + // the below explicit database insertion towards node 0. + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_before, expected_msgs, dummy_buckets_to_return)); + } + _sender.clear(); + fake_clock().setAbsoluteTimeInSeconds(1000); + lib::ClusterState state("distributor:1 storage:1"); + set_cluster_state(state); + ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size()); + + // Before replying with the bucket info, simulate the arrival of a mutation + // reply that alters the state of the bucket with information that will be + // more recent that what is returned by the bucket info. This information + // must not be lost when the bucket info is later merged into the database. + document::BucketId bucket(16, 1); + constexpr uint64_t insertion_timestamp = 1001ULL * 1000000; + api::BucketInfo wanted_info(5, 6, 7); + stripe_of_bucket(bucket).bucket_db_updater().operation_context().update_bucket_database( + makeDocumentBucket(bucket), + BucketCopy(insertion_timestamp, 0, wanted_info), + DatabaseUpdate::CREATE_IF_NONEXISTING); + + fake_clock().setAbsoluteTimeInSeconds(1002); + constexpr uint32_t buckets_returned = 10; // Buckets (16, 0) ... (16, 9) + // Return bucket information which on the timeline might originate from + // anywhere between [1000, 1002]. Our assumption is that any mutations + // taking place after t=1000 must have its reply received and processed + // by this distributor and timestamped strictly higher than t=1000 (modulo + // clock skew, of course, but that is outside the scope of this). A mutation + // happening before t=1000 but receiving a reply at t>1000 does not affect + // correctness, as this should contain the same bucket info as that + // contained in the full bucket reply and the DB update is thus idempotent. + for (uint32_t i = 0; i < _bucket_spaces.size(); ++i) { + ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(state, *_sender.command(i), buckets_returned)); + } + + BucketDatabase::Entry e = get_bucket(bucket); + ASSERT_EQ(uint32_t(1), e->getNodeCount()); + EXPECT_EQ(wanted_info, e->getNodeRef(0).getBucketInfo()); +} + +std::vector<uint16_t> +TopLevelBucketDBUpdaterTest::get_send_set() const +{ + std::vector<uint16_t> nodes; + std::transform(_sender.commands().begin(), + _sender.commands().end(), + std::back_inserter(nodes), + [](auto& cmd) + { + auto& req(dynamic_cast<const api::RequestBucketInfoCommand&>(*cmd)); + return req.getAddress()->getIndex(); + }); + return nodes; +} + +std::vector<uint16_t> +TopLevelBucketDBUpdaterTest::get_sent_nodes_with_preemption( + const std::string& old_cluster_state, + uint32_t expected_old_state_messages, + const std::string& preempted_cluster_state, + const std::string& new_cluster_state) +{ + uint32_t dummy_buckets_to_return = 10; + // FIXME cannot chain assertion checks in non-void function + set_and_enable_cluster_state(lib::ClusterState(old_cluster_state), + expected_old_state_messages, + dummy_buckets_to_return); + + _sender.clear(); + + set_cluster_state(preempted_cluster_state); + _sender.clear(); + // Do not allow the pending state to become the active state; trigger a + // new transition without ACKing the info requests first. This will + // overwrite the pending state entirely. + set_cluster_state(lib::ClusterState(new_cluster_state)); + return get_send_set(); +} + +std::vector<uint16_t> +TopLevelBucketDBUpdaterTest::expand_node_vec(const std::vector<uint16_t>& nodes) +{ + std::vector<uint16_t> res; + size_t count = _bucket_spaces.size(); + for (const auto &node : nodes) { + for (uint32_t i = 0; i < count; ++i) { + res.push_back(node); + } + } + return res; +} + +/* + * If we don't carry over the set of nodes that we need to fetch from, + * a naive comparison between the active state and the new state will + * make it appear to the distributor that nothing has changed, as any + * database modifications caused by intermediate states will not be + * accounted for (basically the ABA problem in a distributed setting). + */ +TEST_F(TopLevelBucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_to_next_state_fetch) { + EXPECT_EQ(expand_node_vec({0, 1, 2, 3, 4, 5}), + get_sent_nodes_with_preemption("version:1 distributor:6 storage:6", + message_count(6), + "version:2 distributor:6 .5.s:d storage:6", + "version:3 distributor:6 storage:6")); +} + +TEST_F(TopLevelBucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_next_state_fetch) { + EXPECT_EQ(expand_node_vec({2, 3}), + get_sent_nodes_with_preemption( + "version:1 distributor:6 storage:6 .2.s:d", + message_count(5), + "version:2 distributor:6 storage:6 .2.s:d .3.s:d", + "version:3 distributor:6 storage:6")); +} + +TEST_F(TopLevelBucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) { + EXPECT_EQ(expand_node_vec({2}), + get_sent_nodes_with_preemption( + "version:1 distributor:6 storage:6", + message_count(6), + "version:2 distributor:6 storage:6 .2.s:d", + "version:3 distributor:6 storage:6")); +} + +using NodeVec = std::vector<uint16_t>; + +TEST_F(TopLevelBucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) { + EXPECT_EQ(NodeVec{}, + get_sent_nodes_with_preemption( + "version:1 distributor:6 storage:6 .2.s:d", + message_count(5), + "version:2 distributor:6 storage:6", // Sends to 2. + "version:3 distributor:6 storage:6 .2.s:d")); // 2 down again. +} + +TEST_F(TopLevelBucketDBUpdaterTest, do_not_send_to_preempted_node_not_part_of_new_state) { + // Even though 100 nodes are preempted, not all of these should be part + // of the request afterwards when only 6 are part of the state. + EXPECT_EQ(expand_node_vec({0, 1, 2, 3, 4, 5}), + get_sent_nodes_with_preemption( + "version:1 distributor:6 storage:100", + message_count(100), + "version:2 distributor:5 .4.s:d storage:100", + "version:3 distributor:6 storage:6")); +} + +TEST_F(TopLevelBucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_completion) { + lib::ClusterState state_before("version:1 distributor:6 storage:6 .1.t:1234"); + uint32_t expected_msgs = message_count(6), dummy_buckets_to_return = 10; + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_before, expected_msgs, dummy_buckets_to_return)); + _sender.clear(); + // New cluster state that should not by itself trigger any new fetches, + // unless outdated node set is somehow not cleared after an enabled + // (completed) cluster state has been set. + set_cluster_state("version:3 distributor:6 storage:6"); + EXPECT_EQ(size_t(0), _sender.commands().size()); +} + +// XXX test currently disabled since distribution config currently isn't used +// at all in order to deduce the set of nodes to send to. This might not matter +// in practice since it is assumed that the cluster state matching the new +// distribution config will follow very shortly after the config has been +// applied to the node. The new cluster state will then send out requests to +// the correct node set. +TEST_F(TopLevelBucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_available_nodes) { + uint32_t expected_msgs = 6, dummy_buckets_to_return = 20; + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(lib::ClusterState("distributor:6 storage:6"), + expected_msgs, dummy_buckets_to_return)); + _sender.clear(); + + // Intentionally trigger a racing config change which arrives before the + // new cluster state representing it. + set_distribution(dist_config_3_nodes_in_1_group()); + sort_sent_messages_by_index(_sender); + + EXPECT_EQ((NodeVec{0, 1, 2}), get_send_set()); +} + +/** + * Test scenario where a cluster is downsized by removing a subset of the nodes + * from the distribution configuration. The system must be able to deal with + * a scenario where the set of nodes between two cluster states across a config + * change may differ. + * + * See VESPA-790 for details. + */ +TEST_F(TopLevelBucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_ownership_transfer) { + uint32_t expected_msgs = message_count(3), dummy_buckets_to_return = 1; + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(lib::ClusterState("distributor:3 storage:3"), + expected_msgs, dummy_buckets_to_return)); + _sender.clear(); + + // Cluster goes from {0, 1, 2} -> {0, 1}. This leaves us with a config + // that does not contain node 2 while the _active_ cluster state still + // contains this node. + const char* downsize_cfg = + "redundancy 2\n" + "distributor_auto_ownership_transfer_on_whole_group_down true\n" + "group[2]\n" + "group[0].name \"invalid\"\n" + "group[0].index \"invalid\"\n" + "group[0].partitions 1|*\n" + "group[0].nodes[0]\n" + "group[1].name rack0\n" + "group[1].index 0\n" + "group[1].nodes[2]\n" + "group[1].nodes[0].index 0\n" + "group[1].nodes[1].index 1\n"; + + set_distribution(downsize_cfg); + sort_sent_messages_by_index(_sender); + _sender.clear(); + + // Attempt to apply state with {0, 1} set. This will compare the new state + // with the previous state, which still has node 2. + expected_msgs = message_count(2); + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(lib::ClusterState("distributor:2 storage:2"), + expected_msgs, dummy_buckets_to_return)); + + EXPECT_EQ(expand_node_vec({0, 1}), get_send_set()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer) { + auto fixture = create_pending_state_fixture_for_state_change( + "distributor:2 storage:2", "distributor:1 storage:2"); + EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer()); + + fixture = create_pending_state_fixture_for_state_change( + "distributor:2 storage:2", "distributor:2 .1.s:d storage:2"); + EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_transfer) { + auto fixture = create_pending_state_fixture_for_state_change( + "distributor:2 storage:2", "distributor:2 storage:1"); + EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer()); + + fixture = create_pending_state_fixture_for_state_change( + "distributor:2 storage:2", "distributor:2 storage:2 .1.s:d"); + EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, changed_distribution_config_implies_ownership_transfer) { + auto fixture = create_pending_state_fixture_for_distribution_change("distributor:2 storage:2"); + EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, transition_time_tracked_for_single_state_change) { + ASSERT_NO_FATAL_FAILURE(complete_state_transition_in_seconds("distributor:2 storage:2", 5, message_count(2))); + + EXPECT_EQ(uint64_t(5000), last_transition_time_in_millis()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, transition_time_reset_across_non_preempting_state_changes) { + ASSERT_NO_FATAL_FAILURE(complete_state_transition_in_seconds("distributor:2 storage:2", 5, message_count(2))); + ASSERT_NO_FATAL_FAILURE(complete_state_transition_in_seconds("distributor:2 storage:3", 3, message_count(1))); + + EXPECT_EQ(uint64_t(3000), last_transition_time_in_millis()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, transition_time_tracked_for_distribution_config_change) { + lib::ClusterState state("distributor:2 storage:2"); + ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state, message_count(2), 1)); + + _sender.clear(); + set_distribution(dist_config_3_nodes_in_1_group()); + fake_clock().addSecondsToTime(4); + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(state, message_count(2))); + EXPECT_EQ(uint64_t(4000), last_transition_time_in_millis()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions) { + _sender.clear(); + set_cluster_state("version:1 distributor:2 storage:2"); + fake_clock().addSecondsToTime(5); + // Pre-empted with new state here, which will push out the old pending + // state and replace it with a new one. We should still count the time + // used processing the old state. + ASSERT_NO_FATAL_FAILURE(complete_state_transition_in_seconds("version:2 distributor:2 storage:3", 3, message_count(3))); + + EXPECT_EQ(uint64_t(8000), last_transition_time_in_millis()); +} + +/* + * Brief reminder on test DSL for checking bucket merge operations: + * + * merge_bucket_lists() takes as input strings of the format + * <node>:<raw bucket id>/<checksum>/<num docs>/<doc size>|<node>: + * and returns a string describing the bucket DB post-merge with the format + * <raw bucket id>:<node>/<checksum>/<num docs>/<doc size>,<node>:....|<raw bucket id>:.... + * + * Yes, the order of node<->bucket id is reversed between the two, perhaps to make sure you're awake. + */ + +TEST_F(TopLevelBucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted) { + // Replacing bucket information for content node 0 should not mark existing + // untrusted replica as trusted as a side effect. + EXPECT_EQ("5:1/7/8/9/u,0/1/2/3/u|", + merge_bucket_lists( + lib::ClusterState("distributor:1 storage:3 .0.s:i"), + "0:5/0/0/0|1:5/7/8/9", + lib::ClusterState("distributor:1 storage:3 .0.s:u"), + "0:5/1/2/3|1:5/7/8/9", true)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, batch_add_of_new_diverging_replicas_does_not_mark_any_as_trusted) { + EXPECT_EQ("5:1/7/8/9/u,0/1/2/3/u|", + merge_bucket_lists("", "0:5/1/2/3|1:5/7/8/9", true)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, batch_add_with_single_resulting_replica_implicitly_marks_as_trusted) { + EXPECT_EQ("5:0/1/2/3/t|", + merge_bucket_lists("", "0:5/1/2/3", true)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, identity_update_of_single_replica_does_not_clear_trusted) { + EXPECT_EQ("5:0/1/2/3/t|", + merge_bucket_lists("0:5/1/2/3", "0:5/1/2/3", true)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, identity_update_of_diverging_untrusted_replicas_does_not_mark_any_as_trusted) { + EXPECT_EQ("5:1/7/8/9/u,0/1/2/3/u|", + merge_bucket_lists("0:5/1/2/3|1:5/7/8/9", "0:5/1/2/3|1:5/7/8/9", true)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, adding_diverging_replica_to_existing_trusted_does_not_remove_trusted) { + EXPECT_EQ("5:1/2/3/4/u,0/1/2/3/t|", + merge_bucket_lists("0:5/1/2/3", "0:5/1/2/3|1:5/2/3/4", true)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_diverging_replicas_as_trusted) { + // This differs from batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted + // in that _all_ content nodes are considered outdated when distributor changes take place, + // and therefore a slightly different code path is taken. In particular, bucket info for + // outdated nodes gets removed before possibly being re-added (if present in the bucket info + // response). + EXPECT_EQ("5:1/7/8/9/u,0/1/2/3/u|", + merge_bucket_lists( + lib::ClusterState("distributor:2 storage:3"), + "0:5/1/2/3|1:5/7/8/9", + lib::ClusterState("distributor:1 storage:3"), + "0:5/1/2/3|1:5/7/8/9", true)); +} + +// TODO remove on Vespa 8 - this is a workaround for https://github.com/vespa-engine/vespa/issues/8475 +TEST_F(TopLevelBucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format_upon_request_rejection) { + set_distribution(dist_config_6_nodes_across_2_groups()); + + const vespalib::string current_hash = "(0d*|*(0;0;1;2)(1;3;4;5))"; + const vespalib::string legacy_hash = "(0d3|3|*(0;0;1;2)(1;3;4;5))"; + + set_cluster_state("distributor:6 storage:6"); + ASSERT_EQ(message_count(6), _sender.commands().size()); + + api::RequestBucketInfoCommand* global_req = nullptr; + for (auto& cmd : _sender.commands()) { + auto& req_cmd = dynamic_cast<api::RequestBucketInfoCommand&>(*cmd); + if (req_cmd.getBucketSpace() == document::FixedBucketSpaces::global_space()) { + global_req = &req_cmd; + break; + } + } + ASSERT_TRUE(global_req != nullptr); + ASSERT_EQ(current_hash, global_req->getDistributionHash()); + + auto reply = std::make_shared<api::RequestBucketInfoReply>(*global_req); + reply->setResult(api::ReturnCode::REJECTED); + bucket_db_updater().onRequestBucketInfoReply(reply); + + fake_clock().addSecondsToTime(10); + bucket_db_updater().resend_delayed_messages(); + + // Should now be a resent request with legacy distribution hash + ASSERT_EQ(message_count(6) + 1, _sender.commands().size()); + auto& legacy_req = dynamic_cast<api::RequestBucketInfoCommand&>(*_sender.commands().back()); + ASSERT_EQ(legacy_hash, legacy_req.getDistributionHash()); + + // Now if we reject it _again_ we should cycle back to the current hash + // in case it wasn't a hash-based rejection after all. And the circle of life continues. + reply = std::make_shared<api::RequestBucketInfoReply>(legacy_req); + reply->setResult(api::ReturnCode::REJECTED); + bucket_db_updater().onRequestBucketInfoReply(reply); + + fake_clock().addSecondsToTime(10); + bucket_db_updater().resend_delayed_messages(); + + ASSERT_EQ(message_count(6) + 2, _sender.commands().size()); + auto& new_current_req = dynamic_cast<api::RequestBucketInfoCommand&>(*_sender.commands().back()); + ASSERT_EQ(current_hash, new_current_req.getDistributionHash()); +} + +namespace { + +template <typename Func> +void for_each_bucket(const BucketDatabase& db, const document::BucketSpace& space, Func&& f) { + BucketId last(0); + auto e = db.getNext(last); + while (e.valid()) { + f(space, e); + e = db.getNext(e.getBucketId()); + } +} + +template <typename Func> +void for_each_bucket(const DistributorBucketSpaceRepo& repo, Func&& f) { + for (const auto& space : repo) { + for_each_bucket(space.second->getBucketDatabase(), space.first, f); + } +} + +} + +TEST_F(TopLevelBucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership_change) { + set_stale_reads_enabled(true); + + lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition + set_cluster_state_bundle(lib::ClusterStateBundle(initial_state, {}, false)); // Skip activation step for simplicity + + ASSERT_EQ(message_count(4), _sender.commands().size()); + constexpr uint32_t n_buckets = 10; + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(initial_state, message_count(4), n_buckets)); + _sender.clear(); + + EXPECT_EQ(n_buckets, mutable_default_dbs_size()); + EXPECT_EQ(n_buckets, mutable_global_dbs_size()); + EXPECT_EQ(0u, read_only_default_dbs_size()); + EXPECT_EQ(0u, read_only_global_dbs_size()); + + lib::ClusterState pending_state("distributor:2 storage:4"); + + std::unordered_set<Bucket, Bucket::hash> buckets_not_owned_in_pending_state; + for (auto* s : distributor_stripes()) { + for_each_bucket(mutable_repo(*s), [&](const auto& space, const auto& entry) { + if (!distributor_bucket_space(entry.getBucketId()).owns_bucket_in_state(pending_state, entry.getBucketId())) { + buckets_not_owned_in_pending_state.insert(Bucket(space, entry.getBucketId())); + } + }); + } + EXPECT_FALSE(buckets_not_owned_in_pending_state.empty()); + + set_cluster_state_bundle(lib::ClusterStateBundle(pending_state, {}, true)); // Now requires activation + + const auto buckets_not_owned_per_space = (buckets_not_owned_in_pending_state.size() / 2); // 2 spaces + const auto expected_mutable_buckets = n_buckets - buckets_not_owned_per_space; + EXPECT_EQ(expected_mutable_buckets, mutable_default_dbs_size()); + EXPECT_EQ(expected_mutable_buckets, mutable_global_dbs_size()); + EXPECT_EQ(buckets_not_owned_per_space, read_only_default_dbs_size()); + EXPECT_EQ(buckets_not_owned_per_space, read_only_global_dbs_size()); + + for (auto* s : distributor_stripes()) { + for_each_bucket(read_only_repo(*s), [&](const auto& space, const auto& entry) { + EXPECT_TRUE(buckets_not_owned_in_pending_state.find(Bucket(space, entry.getBucketId())) + != buckets_not_owned_in_pending_state.end()); + }); + } +} + +TEST_F(TopLevelBucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_only_database) { + constexpr uint32_t n_buckets = 10; + // No ownership change, just node down. Test redundancy is 2, so removing 2 nodes will + // cause some buckets to be entirely unavailable. + trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4, + "version:2 distributor:1 storage:4 .0.s:d .1.s:m", n_buckets, 0); + + EXPECT_EQ(0u, read_only_default_dbs_size()); + EXPECT_EQ(0u, read_only_global_dbs_size()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_config_disabled) { + set_stale_reads_enabled(false); + + lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition + set_cluster_state_bundle(lib::ClusterStateBundle(initial_state, {}, false)); // Skip activation step for simplicity + + ASSERT_EQ(message_count(4), _sender.commands().size()); + constexpr uint32_t n_buckets = 10; + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(initial_state, message_count(4), n_buckets)); + _sender.clear(); + + // Nothing in read-only DB after first bulk load of buckets. + EXPECT_EQ(0u, read_only_default_dbs_size()); + EXPECT_EQ(0u, read_only_global_dbs_size()); + + set_cluster_state("distributor:2 storage:4"); + // No buckets should be moved into read only db after ownership changes. + EXPECT_EQ(0u, read_only_default_dbs_size()); + EXPECT_EQ(0u, read_only_global_dbs_size()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until_activation_received) { + set_stale_reads_enabled(true); + constexpr uint32_t n_buckets = 10; + ASSERT_NO_FATAL_FAILURE( + trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4, + "version:2 distributor:1 storage:4", n_buckets, 4)); + + // Version should not be switched over yet + EXPECT_EQ(1u, current_cluster_state_bundle().getVersion()); + + EXPECT_EQ(0u, mutable_default_dbs_size()); + EXPECT_EQ(0u, mutable_global_dbs_size()); + + EXPECT_FALSE(activate_cluster_state_version(2)); + + EXPECT_EQ(2u, current_cluster_state_bundle().getVersion()); + EXPECT_EQ(n_buckets, mutable_default_dbs_size()); + EXPECT_EQ(n_buckets, mutable_global_dbs_size()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated) { + set_stale_reads_enabled(true); + constexpr uint32_t n_buckets = 10; + ASSERT_NO_FATAL_FAILURE( + trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4, + "version:2 distributor:2 storage:4", n_buckets, 0)); + EXPECT_FALSE(activate_cluster_state_version(2)); + + EXPECT_EQ(0u, read_only_default_dbs_size()); + EXPECT_EQ(0u, read_only_global_dbs_size()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_down) { + set_stale_reads_enabled(true); + constexpr uint32_t n_buckets = 10; + ASSERT_NO_FATAL_FAILURE( + trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4, + "version:2 distributor:1 .0.s:d storage:4", n_buckets, 0)); + + // State not yet activated, so read-only DBs have got all the buckets we used to have. + EXPECT_EQ(0u, mutable_default_dbs_size()); + EXPECT_EQ(0u, mutable_global_dbs_size()); + EXPECT_EQ(n_buckets, read_only_default_dbs_size()); + EXPECT_EQ(n_buckets, read_only_global_dbs_size()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_version_returns_actual_version) { + set_stale_reads_enabled(true); + constexpr uint32_t n_buckets = 10; + ASSERT_NO_FATAL_FAILURE( + trigger_completed_but_not_yet_activated_transition("version:4 distributor:1 storage:4", n_buckets, 4, + "version:5 distributor:2 storage:4", n_buckets, 0)); + + EXPECT_TRUE(activate_cluster_state_version(4)); // Too old version + ASSERT_NO_FATAL_FAILURE(assert_has_activate_cluster_state_reply_with_actual_version(5)); + + EXPECT_TRUE(activate_cluster_state_version(6)); // More recent version than what has been observed + ASSERT_NO_FATAL_FAILURE(assert_has_activate_cluster_state_reply_with_actual_version(5)); +} + +TEST_F(TopLevelBucketDBUpdaterTest, activate_cluster_state_request_without_pending_transition_passes_message_through) { + set_stale_reads_enabled(true); + constexpr uint32_t n_buckets = 10; + ASSERT_NO_FATAL_FAILURE( + trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4, + "version:2 distributor:1 storage:4", n_buckets, 4)); + // Activate version 2; no pending cluster state after this. + EXPECT_FALSE(activate_cluster_state_version(2)); + + // No pending cluster state for version 3, just passed through to be implicitly bounced by state manager. + // Note: state manager is not modelled in this test, so we just check that the message handler returns + // false (meaning "didn't take message ownership") and there's no auto-generated reply. + EXPECT_FALSE(activate_cluster_state_version(3)); + EXPECT_EQ(size_t(0), _sender.replies().size()); +} + +TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_state_is_pending) { + auto initial_baseline = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d"); + auto initial_default = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m"); + + lib::ClusterStateBundle initial_bundle(*initial_baseline, {{FixedBucketSpaces::default_space(), initial_default}, + {FixedBucketSpaces::global_space(), initial_baseline}}); + set_cluster_state_bundle(initial_bundle); + + for (auto* s : distributor_stripes()) { + auto* state = s->bucket_db_updater().pendingClusterStateOrNull(FixedBucketSpaces::default_space()); + ASSERT_TRUE(state != nullptr); + EXPECT_EQ(*initial_default, *state); + + state = s->bucket_db_updater().pendingClusterStateOrNull(FixedBucketSpaces::global_space()); + ASSERT_TRUE(state != nullptr); + EXPECT_EQ(*initial_baseline, *state); + } + + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(*initial_baseline, message_count(1), 0)); + + for (auto* s : distributor_stripes()) { + auto* state = s->bucket_db_updater().pendingClusterStateOrNull(FixedBucketSpaces::default_space()); + EXPECT_TRUE(state == nullptr); + + state = s->bucket_db_updater().pendingClusterStateOrNull(FixedBucketSpaces::global_space()); + EXPECT_TRUE(state == nullptr); + } +} + +struct BucketDBUpdaterSnapshotTest : TopLevelBucketDBUpdaterTest { + lib::ClusterState empty_state; + std::shared_ptr<lib::ClusterState> initial_baseline; + std::shared_ptr<lib::ClusterState> initial_default; + lib::ClusterStateBundle initial_bundle; + Bucket default_bucket; + Bucket global_bucket; + + BucketDBUpdaterSnapshotTest() + : TopLevelBucketDBUpdaterTest(), + empty_state(), + initial_baseline(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d")), + initial_default(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m")), + initial_bundle(*initial_baseline, {{FixedBucketSpaces::default_space(), initial_default}, + {FixedBucketSpaces::global_space(), initial_baseline}}), + default_bucket(FixedBucketSpaces::default_space(), BucketId(16, 1234)), + global_bucket(FixedBucketSpaces::global_space(), BucketId(16, 1234)) + { + } + ~BucketDBUpdaterSnapshotTest() override; + + void SetUp() override { + TopLevelBucketDBUpdaterTest::SetUp(); + set_stale_reads_enabled(true); + }; + + // Assumes that the distributor owns all buckets, so it may choose any arbitrary bucket in the bucket space + uint32_t buckets_in_snapshot_matching_current_db(bool check_mutable_repo, BucketSpace bucket_space) { + uint32_t found_buckets = 0; + for (auto* s : distributor_stripes()) { + auto rs = s->bucket_db_updater().read_snapshot_for_bucket(Bucket(bucket_space, BucketId(16, 1234))); + if (!rs.is_routable()) { + return 0; + } + auto guard = rs.steal_read_guard(); + auto& repo = check_mutable_repo ? mutable_repo(*s) : read_only_repo(*s); + for_each_bucket(repo, [&](const auto& space, const auto& entry) { + if (space == bucket_space) { + auto entries = guard->find_parents_and_self(entry.getBucketId()); + if (entries.size() == 1) { + ++found_buckets; + } + } + }); + } + return found_buckets; + } +}; + +BucketDBUpdaterSnapshotTest::~BucketDBUpdaterSnapshotTest() = default; + +TEST_F(BucketDBUpdaterSnapshotTest, default_space_snapshot_prior_to_activated_state_is_non_routable) { + auto rs = stripe_of_bucket(default_bucket).bucket_db_updater().read_snapshot_for_bucket(default_bucket); + EXPECT_FALSE(rs.is_routable()); +} + +TEST_F(BucketDBUpdaterSnapshotTest, global_space_snapshot_prior_to_activated_state_is_non_routable) { + auto rs = stripe_of_bucket(global_bucket).bucket_db_updater().read_snapshot_for_bucket(global_bucket); + EXPECT_FALSE(rs.is_routable()); +} + +TEST_F(BucketDBUpdaterSnapshotTest, read_snapshot_returns_appropriate_cluster_states) { + set_cluster_state_bundle(initial_bundle); + // State currently pending, empty initial state is active + + auto def_rs = stripe_of_bucket(default_bucket).bucket_db_updater().read_snapshot_for_bucket(default_bucket); + EXPECT_EQ(def_rs.context().active_cluster_state()->toString(), empty_state.toString()); + EXPECT_EQ(def_rs.context().default_active_cluster_state()->toString(), empty_state.toString()); + ASSERT_TRUE(def_rs.context().has_pending_state_transition()); + EXPECT_EQ(def_rs.context().pending_cluster_state()->toString(), initial_default->toString()); + + auto global_rs = stripe_of_bucket(global_bucket).bucket_db_updater().read_snapshot_for_bucket(global_bucket); + EXPECT_EQ(global_rs.context().active_cluster_state()->toString(), empty_state.toString()); + EXPECT_EQ(global_rs.context().default_active_cluster_state()->toString(), empty_state.toString()); + ASSERT_TRUE(global_rs.context().has_pending_state_transition()); + EXPECT_EQ(global_rs.context().pending_cluster_state()->toString(), initial_baseline->toString()); + + ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(*initial_baseline, message_count(1), 0)); + // State now activated, no pending + + def_rs = stripe_of_bucket(default_bucket).bucket_db_updater().read_snapshot_for_bucket(default_bucket); + EXPECT_EQ(def_rs.context().active_cluster_state()->toString(), initial_default->toString()); + EXPECT_EQ(def_rs.context().default_active_cluster_state()->toString(), initial_default->toString()); + EXPECT_FALSE(def_rs.context().has_pending_state_transition()); + + global_rs = stripe_of_bucket(global_bucket).bucket_db_updater().read_snapshot_for_bucket(global_bucket); + EXPECT_EQ(global_rs.context().active_cluster_state()->toString(), initial_baseline->toString()); + EXPECT_EQ(global_rs.context().default_active_cluster_state()->toString(), initial_default->toString()); + EXPECT_FALSE(global_rs.context().has_pending_state_transition()); +} + +TEST_F(BucketDBUpdaterSnapshotTest, snapshot_with_no_pending_state_transition_returns_mutable_db_guard) { + constexpr uint32_t n_buckets = 10; + ASSERT_NO_FATAL_FAILURE( + trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4, + "version:2 distributor:1 storage:4", n_buckets, 4)); + EXPECT_FALSE(activate_cluster_state_version(2)); + EXPECT_EQ(buckets_in_snapshot_matching_current_db(true, FixedBucketSpaces::default_space()), n_buckets); + EXPECT_EQ(buckets_in_snapshot_matching_current_db(true, FixedBucketSpaces::global_space()), n_buckets); +} + +TEST_F(BucketDBUpdaterSnapshotTest, snapshot_returns_unroutable_for_non_owned_bucket_in_current_state) { + ASSERT_NO_FATAL_FAILURE( + trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4, + "version:2 distributor:2 .0.s:d storage:4", 0, 0)); + EXPECT_FALSE(activate_cluster_state_version(2)); + // We're down in state 2 and therefore do not own any buckets + auto def_rs = stripe_of_bucket(default_bucket).bucket_db_updater().read_snapshot_for_bucket(default_bucket); + EXPECT_FALSE(def_rs.is_routable()); +} + +TEST_F(BucketDBUpdaterSnapshotTest, snapshot_with_pending_state_returns_read_only_guard_for_bucket_only_owned_in_current_state) { + constexpr uint32_t n_buckets = 10; + ASSERT_NO_FATAL_FAILURE( + trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4, + "version:2 distributor:2 .0.s:d storage:4", 0, 0)); + EXPECT_EQ(buckets_in_snapshot_matching_current_db(false, FixedBucketSpaces::default_space()), n_buckets); + EXPECT_EQ(buckets_in_snapshot_matching_current_db(false, FixedBucketSpaces::global_space()), n_buckets); +} + +TEST_F(BucketDBUpdaterSnapshotTest, snapshot_is_unroutable_if_stale_reads_disabled_and_bucket_not_owned_in_pending_state) { + set_stale_reads_enabled(false); + constexpr uint32_t n_buckets = 10; + ASSERT_NO_FATAL_FAILURE( + trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4, + "version:2 distributor:2 .0.s:d storage:4", 0, 0)); + auto def_rs = stripe_of_bucket(default_bucket).bucket_db_updater().read_snapshot_for_bucket(default_bucket); + EXPECT_FALSE(def_rs.is_routable()); +} + +} diff --git a/storage/src/tests/distributor/top_level_distributor_test.cpp b/storage/src/tests/distributor/top_level_distributor_test.cpp index 348a964dd53..8fae1c6d738 100644 --- a/storage/src/tests/distributor/top_level_distributor_test.cpp +++ b/storage/src/tests/distributor/top_level_distributor_test.cpp @@ -70,18 +70,6 @@ struct TopLevelDistributorTest : Test, TopLevelDistributorTestUtil { return posted_msgs.str(); } - void tick_distributor_and_stripes_n_times(uint32_t n) { - for (uint32_t i = 0; i < n; ++i) { - tick(false); - } - } - - void tick_top_level_distributor_n_times(uint32_t n) { - for (uint32_t i = 0; i < n; ++i) { - tick(true); - } - } - StatusReporterDelegate& distributor_status_delegate() { return _distributor->_distributorStatusDelegate; } @@ -98,10 +86,6 @@ struct TopLevelDistributorTest : Test, TopLevelDistributorTestUtil { return _distributor->_status_to_do; } - TopLevelDistributor::MetricUpdateHook distributor_metric_update_hook() { - return _distributor->_metricUpdateHook; - } - BucketSpacesStatsProvider::PerNodeBucketSpacesStats distributor_bucket_spaces_stats() { return _distributor->getBucketSpacesStats(); } @@ -112,10 +96,6 @@ struct TopLevelDistributorTest : Test, TopLevelDistributorTestUtil { distributor_stripes().front()->db_memory_sample_interval()).count(); } - static std::vector<document::BucketSpace> bucket_spaces() { - return {document::FixedBucketSpaces::default_space(), document::FixedBucketSpaces::global_space()}; - } - size_t explicit_node_state_reply_send_invocations() const noexcept { return _node->getNodeStateUpdater().explicit_node_state_reply_send_invocations(); } @@ -242,8 +222,8 @@ public: TEST_F(TopLevelDistributorTest, tick_aggregates_status_requests_from_all_stripes) { setup_distributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1"); - ASSERT_NE(stripe_of_bucket(document::BucketId(16, 1)), - stripe_of_bucket(document::BucketId(16, 2))); + ASSERT_NE(stripe_index_of_bucket(document::BucketId(16, 1)), + stripe_index_of_bucket(document::BucketId(16, 2))); add_nodes_to_stripe_bucket_db(document::BucketId(16, 1), "0=1/1/1/t"); add_nodes_to_stripe_bucket_db(document::BucketId(16, 2), "0=2/2/2/t"); @@ -521,8 +501,8 @@ TEST_F(TopLevelDistributorTest, leaving_recovery_mode_immediately_sends_getnodes ASSERT_EQ(0, explicit_node_state_reply_send_invocations()); // Add a couple of buckets so we have something to iterate over. 2 buckets // map to the same stripe so we'll need 2 ticks to complete a full scan. - ASSERT_EQ(stripe_of_bucket(document::BucketId(16, 1)), - stripe_of_bucket(document::BucketId(16, 5))); + ASSERT_EQ(stripe_index_of_bucket(document::BucketId(16, 1)), + stripe_index_of_bucket(document::BucketId(16, 5))); add_nodes_to_stripe_bucket_db(document::BucketId(16, 1), "0=1/1/1/t/a"); add_nodes_to_stripe_bucket_db(document::BucketId(16, 2), "0=1/1/1/t/a"); diff --git a/storage/src/tests/distributor/top_level_distributor_test_util.cpp b/storage/src/tests/distributor/top_level_distributor_test_util.cpp index 0120ca91787..b6e9beb38ae 100644 --- a/storage/src/tests/distributor/top_level_distributor_test_util.cpp +++ b/storage/src/tests/distributor/top_level_distributor_test_util.cpp @@ -88,13 +88,13 @@ TopLevelDistributorTestUtil::setup_distributor(int redundancy, } size_t -TopLevelDistributorTestUtil::stripe_of_bucket(const document::BucketId& id) const noexcept +TopLevelDistributorTestUtil::stripe_index_of_bucket(const document::BucketId& id) const noexcept { return stripe_of_bucket_key(id.toKey(), _distributor->_n_stripe_bits); } size_t -TopLevelDistributorTestUtil::stripe_of_bucket(const document::Bucket& bucket) const noexcept +TopLevelDistributorTestUtil::stripe_index_of_bucket(const document::Bucket& bucket) const noexcept { return stripe_of_bucket_key(bucket.getBucketId().toKey(), _distributor->_n_stripe_bits); } @@ -177,7 +177,73 @@ TopLevelDistributorTestUtil::add_nodes_to_stripe_bucket_db(const document::Bucke entry->addNodeManual(node); } - stripe_bucket_database(stripe_of_bucket(bucket), bucket.getBucketSpace()).update(entry); + stripe_bucket_database(stripe_index_of_bucket(bucket), bucket.getBucketSpace()).update(entry); +} + +std::string +TopLevelDistributorTestUtil::get_ideal_str(document::BucketId id, const lib::ClusterState& state) +{ + if (!distributor_bucket_space(id).owns_bucket_in_state(state, id)) { + return id.toString(); + } + std::vector<uint16_t> nodes; + _component->getDistribution()->getIdealNodes(lib::NodeType::STORAGE, state, id, nodes); + std::sort(nodes.begin(), nodes.end()); + std::ostringstream ost; + ost << id << ": " << dumpVector(nodes); + return ost.str(); +} + +void +TopLevelDistributorTestUtil::add_ideal_nodes(const lib::ClusterState& state, const document::BucketId& id) +{ + BucketDatabase::Entry entry = get_bucket(id); + + if (!entry.valid()) { + entry = BucketDatabase::Entry(id); + } + + std::vector<uint16_t> res; + assert(_component.get()); + _component->getDistribution()->getIdealNodes(lib::NodeType::STORAGE, state, id, res); + + for (uint32_t i = 0; i < res.size(); ++i) { + if (state.getNodeState(lib::Node(lib::NodeType::STORAGE, res[i])).getState() != + lib::State::MAINTENANCE) + { + entry->addNode(BucketCopy(0, res[i], api::BucketInfo(1,1,1)), + toVector<uint16_t>(0)); + } + } + + stripe_bucket_database(stripe_index_of_bucket(id)).update(entry); +} + +void +TopLevelDistributorTestUtil::add_ideal_nodes(const document::BucketId& id) +{ + // TODO STRIPE good way of getting current active cluster state on top-level distributor + // We assume that all stripes have the same cluster state internally, so just use the first. + assert(_distributor->_stripes[0]); + const auto& bundle = _distributor->_stripes[0]->getClusterStateBundle(); + add_ideal_nodes(*bundle.getBaselineClusterState(), id); +} + +std::string +TopLevelDistributorTestUtil::get_nodes(document::BucketId id) +{ + BucketDatabase::Entry entry = get_bucket(id); + + if (!entry.valid()) { + return id.toString(); + } else { + std::vector<uint16_t> nodes = entry->getNodes(); + std::sort(nodes.begin(), nodes.end()); + + std::ostringstream ost; + ost << id << ": " << dumpVector(nodes); + return ost.str(); + } } void @@ -190,13 +256,32 @@ TopLevelDistributorTestUtil::add_nodes_to_stripe_bucket_db(const document::Bucke BucketDatabase::Entry TopLevelDistributorTestUtil::get_bucket(const document::Bucket& bucket) const { - return stripe_bucket_database(stripe_of_bucket(bucket), bucket.getBucketSpace()).get(bucket.getBucketId()); + return stripe_bucket_database(stripe_index_of_bucket(bucket), bucket.getBucketSpace()).get(bucket.getBucketId()); } BucketDatabase::Entry TopLevelDistributorTestUtil::get_bucket(const document::BucketId& bId) const { - return stripe_bucket_database(stripe_of_bucket(bId)).get(bId); + return stripe_bucket_database(stripe_index_of_bucket(bId)).get(bId); +} + +DistributorBucketSpaceRepo& +TopLevelDistributorTestUtil::top_level_bucket_space_repo() noexcept +{ + return _distributor->_component.bucket_space_repo(); +} + +const DistributorBucketSpaceRepo& +TopLevelDistributorTestUtil::top_level_bucket_space_repo() const noexcept +{ + return _distributor->_component.bucket_space_repo(); +} + +std::unique_ptr<StripeAccessGuard> +TopLevelDistributorTestUtil::acquire_stripe_guard() +{ + // Note: this won't actually interact with any threads, as the pool is running in single-threaded test mode. + return _distributor->_stripe_accessor->rendezvous_and_hold_all(); } TopLevelBucketDBUpdater& @@ -223,9 +308,40 @@ TopLevelDistributorTestUtil::node_context() const { return _distributor->distributor_component(); } -storage::distributor::DistributorStripeOperationContext& -TopLevelDistributorTestUtil::operation_context() { - return _distributor->distributor_component(); +DistributorBucketSpace& +TopLevelDistributorTestUtil::distributor_bucket_space(const document::BucketId& id) +{ + return stripe_of_bucket(id).getBucketSpaceRepo().get(makeBucketSpace()); +} + +const DistributorBucketSpace& +TopLevelDistributorTestUtil::distributor_bucket_space(const document::BucketId& id) const +{ + return stripe_of_bucket(id).getBucketSpaceRepo().get(makeBucketSpace()); +} + +DistributorStripe& +TopLevelDistributorTestUtil::stripe_of_bucket(const document::BucketId& id) noexcept +{ + return *_distributor->_stripes[stripe_index_of_bucket(id)]; +} + +const DistributorStripe& +TopLevelDistributorTestUtil::stripe_of_bucket(const document::BucketId& id) const noexcept +{ + return *_distributor->_stripes[stripe_index_of_bucket(id)]; +} + +DistributorStripe& +TopLevelDistributorTestUtil::stripe_of_bucket(const document::Bucket& bucket) noexcept +{ + return *_distributor->_stripes[stripe_index_of_bucket(bucket.getBucketId())]; +} + +const DistributorStripe& +TopLevelDistributorTestUtil::stripe_of_bucket(const document::Bucket& bucket) const noexcept +{ + return *_distributor->_stripes[stripe_index_of_bucket(bucket.getBucketId())]; } bool @@ -259,6 +375,11 @@ TopLevelDistributorTestUtil::reconfigure(const DistributorConfig& cfg) tick(); // Config is propagated upon next top-level tick } +framework::MetricUpdateHook& +TopLevelDistributorTestUtil::distributor_metric_update_hook() { + return _distributor->_metricUpdateHook; +} + BucketDatabase& TopLevelDistributorTestUtil::stripe_bucket_database(uint16_t stripe_idx) { assert(stripe_idx < _distributor->_stripes.size()); @@ -319,4 +440,55 @@ TopLevelDistributorTestUtil::enable_distributor_cluster_state(const lib::Cluster bucket_db_updater().simulate_cluster_state_bundle_activation(state); } +std::vector<document::BucketSpace> +TopLevelDistributorTestUtil::bucket_spaces() +{ + return {document::FixedBucketSpaces::default_space(), document::FixedBucketSpaces::global_space()}; +} + +void +TopLevelDistributorTestUtil::trigger_distribution_change(std::shared_ptr<lib::Distribution> distr) +{ + _node->getComponentRegister().setDistribution(std::move(distr)); + _distributor->storageDistributionChanged(); + _distributor->enableNextDistribution(); +} + +const lib::ClusterStateBundle& +TopLevelDistributorTestUtil::current_cluster_state_bundle() const +{ + // We assume that all stripes have the same cluster state internally, so just use the first. + assert(_distributor->_stripes[0]); + const auto& bundle = _distributor->_stripes[0]->getClusterStateBundle(); + // ... but sanity-check just to make sure... + for (size_t i = 1; i < _num_distributor_stripes; ++i) { + assert(_distributor->_stripes[i]->getClusterStateBundle() == bundle); + } + return bundle; +} + +void +TopLevelDistributorTestUtil::tick_distributor_and_stripes_n_times(uint32_t n) +{ + for (uint32_t i = 0; i < n; ++i) { + tick(false); + } +} + +void +TopLevelDistributorTestUtil::tick_top_level_distributor_n_times(uint32_t n) +{ + for (uint32_t i = 0; i < n; ++i) { + tick(true); + } +} + +void +TopLevelDistributorTestUtil::complete_recovery_mode_on_all_stripes() +{ + for (auto* s : distributor_stripes()) { + s->scanAllBuckets(); + } +} + } diff --git a/storage/src/tests/distributor/top_level_distributor_test_util.h b/storage/src/tests/distributor/top_level_distributor_test_util.h index b12e9dff304..8832f8ada6e 100644 --- a/storage/src/tests/distributor/top_level_distributor_test_util.h +++ b/storage/src/tests/distributor/top_level_distributor_test_util.h @@ -18,12 +18,15 @@ namespace framework { struct TickingThreadPool; } namespace distributor { class TopLevelDistributor; +class DistributorBucketSpace; +class DistributorBucketSpaceRepo; class DistributorMetricSet; class DistributorNodeContext; class DistributorStripe; class DistributorStripeComponent; class DistributorStripeOperationContext; class DistributorStripePool; +class StripeAccessGuard; class IdealStateMetricSet; class Operation; class TopLevelBucketDBUpdater; @@ -38,8 +41,13 @@ public: void close(); - size_t stripe_of_bucket(const document::BucketId& id) const noexcept; - size_t stripe_of_bucket(const document::Bucket& bucket) const noexcept; + size_t stripe_index_of_bucket(const document::BucketId& id) const noexcept; + size_t stripe_index_of_bucket(const document::Bucket& bucket) const noexcept; + + DistributorStripe& stripe_of_bucket(const document::BucketId& id) noexcept; + const DistributorStripe& stripe_of_bucket(const document::BucketId& id) const noexcept; + DistributorStripe& stripe_of_bucket(const document::Bucket& bucket) noexcept; + const DistributorStripe& stripe_of_bucket(const document::Bucket& bucket) const noexcept; /** * Parses the given string to a set of node => bucket info data, @@ -52,11 +60,19 @@ public: // As the above, but always inserts into default bucket space void add_nodes_to_stripe_bucket_db(const document::BucketId& id, const std::string& nodeStr); + // TODO STRIPE replace with BucketSpaceStateMap once legacy is gone + DistributorBucketSpaceRepo& top_level_bucket_space_repo() noexcept; + const DistributorBucketSpaceRepo& top_level_bucket_space_repo() const noexcept; + + std::unique_ptr<StripeAccessGuard> acquire_stripe_guard(); + TopLevelBucketDBUpdater& bucket_db_updater(); const IdealStateMetricSet& total_ideal_state_metrics() const; const DistributorMetricSet& total_distributor_metrics() const; const storage::distributor::DistributorNodeContext& node_context() const; - storage::distributor::DistributorStripeOperationContext& operation_context(); + + DistributorBucketSpace& distributor_bucket_space(const document::BucketId& id); + const DistributorBucketSpace& distributor_bucket_space(const document::BucketId& id) const; std::vector<DistributorStripe*> distributor_stripes() const; @@ -69,12 +85,19 @@ public: return _node->getClock(); } + framework::MetricUpdateHook& distributor_metric_update_hook(); + BucketDatabase& stripe_bucket_database(uint16_t stripe_idx); // Implicit default space only BucketDatabase& stripe_bucket_database(uint16_t stripe_idx, document::BucketSpace space); const BucketDatabase& stripe_bucket_database(uint16_t stripe_idx) const; // Implicit default space only const BucketDatabase& stripe_bucket_database(uint16_t stripe_idx, document::BucketSpace space) const; [[nodiscard]] bool all_distributor_stripes_are_in_recovery_mode() const; + void tick_distributor_and_stripes_n_times(uint32_t n); + void tick_top_level_distributor_n_times(uint32_t n); + + void complete_recovery_mode_on_all_stripes(); + void setup_distributor(int redundancy, int node_count, const std::string& systemState, @@ -93,6 +116,17 @@ public: // Gets bucket entry from default space only BucketDatabase::Entry get_bucket(const document::BucketId& bId) const; + std::string get_ideal_str(document::BucketId id, const lib::ClusterState& state); + + void add_ideal_nodes(const lib::ClusterState& state, const document::BucketId& id); + void add_ideal_nodes(const document::BucketId& id); + + /** + * Returns a string with the nodes currently stored in the bucket + * database for the given bucket. + */ + std::string get_nodes(document::BucketId id); + DistributorMessageSenderStub& sender() noexcept { return _sender; } const DistributorMessageSenderStub& sender() const noexcept { return _sender; } @@ -101,6 +135,12 @@ public: void receive_set_system_state_command(const vespalib::string& state_str); bool handle_top_level_message(const std::shared_ptr<api::StorageMessage>& msg); + void trigger_distribution_change(std::shared_ptr<lib::Distribution> distr); + + const lib::ClusterStateBundle& current_cluster_state_bundle() const; + + static std::vector<document::BucketSpace> bucket_spaces(); + protected: vdstestlib::DirConfig _config; std::unique_ptr<TestDistributorApp> _node; diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.cpp b/storage/src/vespa/storage/distributor/distributor_stripe.cpp index 1050f511ad6..1a9cb9f303c 100644 --- a/storage/src/vespa/storage/distributor/distributor_stripe.cpp +++ b/storage/src/vespa/storage/distributor/distributor_stripe.cpp @@ -55,7 +55,7 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg, _operationOwner(*this, _component.getClock()), _maintenanceOperationOwner(*this, _component.getClock()), _operation_sequencer(std::make_unique<OperationSequencer>()), - _pendingMessageTracker(compReg), + _pendingMessageTracker(compReg, stripe_index), _bucketDBUpdater(_component, _component, *this, *this, use_legacy_mode), _distributorStatusDelegate(compReg, *this, *this), _bucketDBStatusDelegate(compReg, *this, _bucketDBUpdater), @@ -985,6 +985,7 @@ void DistributorStripe::enable_cluster_state_bundle(const lib::ClusterStateBundle& new_state, bool has_bucket_ownership_change) { + assert(!_use_legacy_mode); // TODO STRIPE replace legacy func enableClusterStateBundle(new_state); if (has_bucket_ownership_change) { @@ -995,6 +996,7 @@ DistributorStripe::enable_cluster_state_bundle(const lib::ClusterStateBundle& ne const auto now = TimePoint(std::chrono::milliseconds(_component.getClock().getTimeInMillis().getTime())); _externalOperationHandler.rejectFeedBeforeTimeReached(_ownershipSafeTimeCalc->safeTimePoint(now)); } + _bucketDBUpdater.handle_activated_cluster_state_bundle(); // Triggers resending of queued requests } void diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.h b/storage/src/vespa/storage/distributor/distributor_stripe.h index 5426d311558..b1b20cf445a 100644 --- a/storage/src/vespa/storage/distributor/distributor_stripe.h +++ b/storage/src/vespa/storage/distributor/distributor_stripe.h @@ -198,6 +198,7 @@ private: friend class TopLevelDistributor; friend class DistributorStripeTestUtil; friend class DistributorTestUtil; + friend class TopLevelDistributorTestUtil; friend class LegacyBucketDBUpdaterTest; friend class MetricUpdateHook; friend class MultiThreadedStripeAccessGuard; diff --git a/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp b/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp index cb76b6d50b2..e4554b793b8 100644 --- a/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp +++ b/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp @@ -1,6 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "pendingmessagetracker.h" #include <vespa/vespalib/stllike/asciistream.h> +#include <vespa/vespalib/util/stringfmt.h> #include <map> #include <algorithm> @@ -9,11 +10,13 @@ LOG_SETUP(".pendingmessages"); namespace storage::distributor { -PendingMessageTracker::PendingMessageTracker(framework::ComponentRegister& cr) - : framework::HtmlStatusReporter("pendingmessages", "Pending messages to storage nodes"), +PendingMessageTracker::PendingMessageTracker(framework::ComponentRegister& cr, uint32_t stripe_index) + : framework::HtmlStatusReporter(vespalib::make_string("pendingmessages%u", stripe_index), + vespalib::make_string("Pending messages to storage nodes (stripe %u)", stripe_index)), _component(cr, "pendingmessagetracker"), _nodeInfo(_component.getClock()), _nodeBusyDuration(60), + _deferred_read_tasks(), _lock() { _component.registerStatusPage(*this); diff --git a/storage/src/vespa/storage/distributor/pendingmessagetracker.h b/storage/src/vespa/storage/distributor/pendingmessagetracker.h index 13d83157150..ac30304ca9c 100644 --- a/storage/src/vespa/storage/distributor/pendingmessagetracker.h +++ b/storage/src/vespa/storage/distributor/pendingmessagetracker.h @@ -77,7 +77,7 @@ public: */ using TimePoint = std::chrono::milliseconds; - explicit PendingMessageTracker(framework::ComponentRegister&); + explicit PendingMessageTracker(framework::ComponentRegister&, uint32_t stripe_index); ~PendingMessageTracker() override; void insert(const std::shared_ptr<api::StorageMessage>&); diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp index 06a9672ba50..c48434484d2 100644 --- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp +++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp @@ -170,6 +170,12 @@ StripeBucketDBUpdater::recheckBucketInfo(uint32_t nodeIdx, sendRequestBucketInfo(nodeIdx, bucket, std::shared_ptr<MergeReplyGuard>()); } +void +StripeBucketDBUpdater::handle_activated_cluster_state_bundle() +{ + sendAllQueuedBucketRechecks(); +} + namespace { class ReadOnlyDbMergingInserter : public BucketDatabase::MergingProcessor { diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h index 1456308c3d0..9bc91ca78e7 100644 --- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h +++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h @@ -46,6 +46,7 @@ public: void flush(); const lib::ClusterState* pendingClusterStateOrNull(const document::BucketSpace&) const; void recheckBucketInfo(uint32_t nodeIdx, const document::Bucket& bucket); + void handle_activated_cluster_state_bundle(); bool onSetSystemState(const std::shared_ptr<api::SetSystemStateCommand>& cmd) override; bool onActivateClusterStateVersion(const std::shared_ptr<api::ActivateClusterStateVersionCommand>& cmd) override; diff --git a/storage/src/vespa/storage/distributor/top_level_distributor.cpp b/storage/src/vespa/storage/distributor/top_level_distributor.cpp index 68a52e27d84..bbef20b2a23 100644 --- a/storage/src/vespa/storage/distributor/top_level_distributor.cpp +++ b/storage/src/vespa/storage/distributor/top_level_distributor.cpp @@ -58,6 +58,7 @@ TopLevelDistributor::TopLevelDistributor(DistributorComponentRegister& compReg, ChainedMessageSender* messageSender) : StorageLink("distributor"), framework::StatusReporter("distributor", "Distributor"), + _node_identity(node_identity), _comp_reg(compReg), _use_legacy_mode(num_distributor_stripes == 0), _metrics(std::make_shared<DistributorMetricSet>()), diff --git a/storage/src/vespa/storage/distributor/top_level_distributor.h b/storage/src/vespa/storage/distributor/top_level_distributor.h index 57ff5268323..5de4b9c1aaa 100644 --- a/storage/src/vespa/storage/distributor/top_level_distributor.h +++ b/storage/src/vespa/storage/distributor/top_level_distributor.h @@ -20,6 +20,7 @@ #include <vespa/storage/common/distributorcomponent.h> #include <vespa/storage/common/doneinitializehandler.h> #include <vespa/storage/common/messagesender.h> +#include <vespa/storage/common/node_identity.h> #include <vespa/storage/distributor/bucketdb/bucketdbmetricupdater.h> #include <vespa/storage/distributor/maintenance/maintenancescheduler.h> #include <vespa/storageapi/message/state.h> @@ -33,7 +34,6 @@ namespace storage { struct DoneInitializeHandler; class HostInfo; - class NodeIdentity; } namespace storage::distributor { @@ -84,6 +84,8 @@ public: DistributorMetricSet& getMetrics(); + const NodeIdentity& node_identity() const noexcept { return _node_identity; } + // Implements DistributorInterface and DistributorMessageSender. DistributorMetricSet& metrics() override { return getMetrics(); } const DistributorConfiguration& config() const override; @@ -205,6 +207,7 @@ private: using MessageQueue = std::vector<std::shared_ptr<api::StorageMessage>>; + const NodeIdentity _node_identity; DistributorComponentRegister& _comp_reg; const bool _use_legacy_mode; std::shared_ptr<DistributorMetricSet> _metrics; diff --git a/storage/src/vespa/storage/storageserver/distributornode.cpp b/storage/src/vespa/storage/storageserver/distributornode.cpp index 0d129ffdb28..620bd3571ce 100644 --- a/storage/src/vespa/storage/storageserver/distributornode.cpp +++ b/storage/src/vespa/storage/storageserver/distributornode.cpp @@ -26,7 +26,9 @@ DistributorNode::DistributorNode( : StorageNode(configUri, context, generationFetcher, std::make_unique<HostInfo>(), !communicationManager ? NORMAL : SINGLE_THREADED_TEST_MODE), - _threadPool(framework::TickingThreadPool::createDefault("distributor")), + // TODO STRIPE: Change waitTime default to 100ms when legacy mode is removed. + _threadPool(framework::TickingThreadPool::createDefault("distributor", + (num_distributor_stripes > 0) ? 100ms : 5ms)), _stripe_pool(std::make_unique<distributor::DistributorStripePool>()), _context(context), _timestamp_mutex(), diff --git a/storageframework/src/vespa/storageframework/generic/thread/tickingthread.h b/storageframework/src/vespa/storageframework/generic/thread/tickingthread.h index 0649d914c75..c80ba0e8945 100644 --- a/storageframework/src/vespa/storageframework/generic/thread/tickingthread.h +++ b/storageframework/src/vespa/storageframework/generic/thread/tickingthread.h @@ -78,6 +78,7 @@ struct ThreadLock { struct TickingThreadPool : public ThreadLock { using UP = std::unique_ptr<TickingThreadPool>; + // TODO STRIPE: Change waitTime default to 100ms when legacy mode is removed. static TickingThreadPool::UP createDefault( vespalib::stringref name, vespalib::duration waitTime = 5ms, diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java index 89b72c249bd..54f2b2fd9e3 100644 --- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java +++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java @@ -1,6 +1,7 @@ // Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.athenz.client.zms; +import com.yahoo.io.IOUtils; import com.yahoo.vespa.athenz.api.AthenzDomain; import com.yahoo.vespa.athenz.api.AthenzGroup; import com.yahoo.vespa.athenz.api.AthenzIdentity; @@ -18,6 +19,7 @@ import com.yahoo.vespa.athenz.client.zms.bindings.DomainListResponseEntity; import com.yahoo.vespa.athenz.client.zms.bindings.MembershipEntity; import com.yahoo.vespa.athenz.client.zms.bindings.PolicyEntity; import com.yahoo.vespa.athenz.client.zms.bindings.ProviderResourceGroupRolesRequestEntity; +import com.yahoo.vespa.athenz.client.zms.bindings.ResponseListEntity; import com.yahoo.vespa.athenz.client.zms.bindings.RoleEntity; import com.yahoo.vespa.athenz.client.zms.bindings.ServiceEntity; import com.yahoo.vespa.athenz.client.zms.bindings.ServiceListResponseEntity; @@ -27,13 +29,19 @@ import com.yahoo.vespa.athenz.utils.AthenzIdentities; import org.apache.http.Header; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.client.methods.RequestBuilder; +import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; import javax.net.ssl.SSLContext; +import java.io.IOException; import java.net.URI; +import java.nio.charset.StandardCharsets; import java.time.Instant; +import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.OptionalInt; import java.util.Set; @@ -183,6 +191,17 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient { } @Override + public void createPolicy(AthenzDomain athenzDomain, String athenzPolicy) { + URI uri = zmsUrl.resolve(String.format("domain/%s/policy/%s", + athenzDomain.getName(), athenzPolicy)); + StringEntity entity = toJsonStringEntity(Map.of("name", athenzPolicy, "assertions", List.of())); + HttpUriRequest request = RequestBuilder.put(uri) + .setEntity(entity) + .build(); + execute(request, response -> readEntity(response, Void.class)); + } + + @Override public void addPolicyRule(AthenzDomain athenzDomain, String athenzPolicy, String action, AthenzResourceName resourceName, AthenzRole athenzRole) { URI uri = zmsUrl.resolve(String.format("domain/%s/policy/%s/assertion", athenzDomain.getName(), athenzPolicy)); @@ -224,19 +243,19 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient { } @Override - public List<AthenzUser> listPendingRoleApprovals(AthenzRole athenzRole) { + public Map<AthenzUser, String> listPendingRoleApprovals(AthenzRole athenzRole) { URI uri = zmsUrl.resolve(String.format("domain/%s/role/%s?pending=true", athenzRole.domain().getName(), athenzRole.roleName())); HttpUriRequest request = RequestBuilder.get() .setUri(uri) .build(); RoleEntity roleEntity = execute(request, response -> readEntity(response, RoleEntity.class)); + return roleEntity.roleMembers().stream() .filter(RoleEntity.Member::pendingApproval) - .map(RoleEntity.Member::memberName) - .map(AthenzIdentities::from) - .filter(identity -> AthenzIdentities.USER_PRINCIPAL_DOMAIN.equals(identity.getDomain())) - .map(AthenzUser.class::cast) - .collect(Collectors.toList()); + .filter(re -> AthenzIdentities.USER_PRINCIPAL_DOMAIN.equals(AthenzIdentities.from(re.memberName()).getDomain())) + .collect(Collectors.toUnmodifiableMap( + m -> (AthenzUser) AthenzIdentities.from(m.memberName()), + RoleEntity.Member::auditRef)); } @Override @@ -289,6 +308,33 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient { execute(RequestBuilder.delete(uri).build(), response -> readEntity(response, Void.class)); } + @Override + public void createRole(AthenzRole role, Map<String, Object> attributes) { + URI uri = zmsUrl.resolve(String.format("domain/%s/role/%s", role.domain().getName(), role.roleName())); + HashMap<String, Object> finalAttributes = new HashMap<>(attributes); + finalAttributes.put("name", role.roleName()); + var request = RequestBuilder.put(uri) + .setEntity(toJsonStringEntity(finalAttributes)) + .build(); + execute(request, response -> readEntity(response, Void.class)); + } + + @Override + public Set<AthenzRole> listRoles(AthenzDomain domain) { + var uri = zmsUrl.resolve(String.format("domain/%s/role", domain.getName())); + ResponseListEntity listResponse = execute(RequestBuilder.get(uri).build(), response -> readEntity(response, ResponseListEntity.class)); + return listResponse.entity.stream() + .map(name -> new AthenzRole(domain, name)) + .collect(Collectors.toSet()); + } + + @Override + public Set<String> listPolicies(AthenzDomain domain) { + var uri = zmsUrl.resolve(String.format("domain/%s/policy", domain.getName())); + ResponseListEntity listResponse = execute(RequestBuilder.get(uri).build(), response -> readEntity(response, ResponseListEntity.class)); + return Set.copyOf(listResponse.entity); + } + private static Header createCookieHeaderWithOktaTokens(OktaIdentityToken identityToken, OktaAccessToken accessToken) { return new BasicHeader("Cookie", String.format("okta_at=%s; okta_it=%s", accessToken.token(), identityToken.token())); } diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java index 2807d20f5c6..2fd1cea0e50 100644 --- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java +++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java @@ -13,6 +13,7 @@ import com.yahoo.vespa.athenz.api.OktaIdentityToken; import java.time.Instant; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.Set; @@ -45,11 +46,13 @@ public interface ZmsClient extends AutoCloseable { boolean hasAccess(AthenzResourceName resource, String action, AthenzIdentity identity); + void createPolicy(AthenzDomain athenzDomain, String athenzPolicy); + void addPolicyRule(AthenzDomain athenzDomain, String athenzPolicy, String action, AthenzResourceName resourceName, AthenzRole athenzRole); boolean deletePolicyRule(AthenzDomain athenzDomain, String athenzPolicy, String action, AthenzResourceName resourceName, AthenzRole athenzRole); - List<AthenzUser> listPendingRoleApprovals(AthenzRole athenzRole); + Map<AthenzUser, String> listPendingRoleApprovals(AthenzRole athenzRole); void approvePendingRoleMembership(AthenzRole athenzRole, AthenzUser athenzUser, Instant expiry); @@ -61,5 +64,11 @@ public interface ZmsClient extends AutoCloseable { void deleteService(AthenzService athenzService); + void createRole(AthenzRole role, Map<String, Object> properties); + + Set<AthenzRole> listRoles(AthenzDomain domain); + + Set<String> listPolicies(AthenzDomain domain); + void close(); } diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/ResponseListEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/ResponseListEntity.java new file mode 100644 index 00000000000..fa64430cd11 --- /dev/null +++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/ResponseListEntity.java @@ -0,0 +1,19 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +package com.yahoo.vespa.athenz.client.zms.bindings; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.List; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class ResponseListEntity { + public final List<String> entity; + + @JsonCreator + public ResponseListEntity(@JsonProperty("names") List<String> entity) { + this.entity = entity; + } +} diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/RoleEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/RoleEntity.java index 5babe292138..537fa1fe50a 100644 --- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/RoleEntity.java +++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/RoleEntity.java @@ -35,12 +35,14 @@ public class RoleEntity { private final String memberName; private final boolean active; private final boolean approved; + private final String auditRef; @JsonCreator - public Member(@JsonProperty("memberName") String memberName, @JsonProperty("active") boolean active, @JsonProperty("approved") boolean approved) { + public Member(@JsonProperty("memberName") String memberName, @JsonProperty("active") boolean active, @JsonProperty("approved") boolean approved, @JsonProperty("auditRef") String auditRef) { this.memberName = memberName; this.active = active; this.approved = approved; + this.auditRef = auditRef; } public String memberName() { @@ -50,5 +52,9 @@ public class RoleEntity { public boolean pendingApproval() { return !approved; } + + public String auditRef() { + return auditRef; + } } } |