X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/24f140f9ed1a2180541c0c7cebf7572c5155fe27..398fa5a29196a5622f8aa6c2edddc76a41c10773:/services/keep-balance/balance_run_test.go diff --git a/services/keep-balance/balance_run_test.go b/services/keep-balance/balance_run_test.go index 0d1b6b5912..81e4c7b867 100644 --- a/services/keep-balance/balance_run_test.go +++ b/services/keep-balance/balance_run_test.go @@ -2,10 +2,10 @@ // // SPDX-License-Identifier: AGPL-3.0 -package main +package keepbalance import ( - "bytes" + "context" "encoding/json" "fmt" "io" @@ -15,6 +15,7 @@ import ( "os" "strings" "sync" + "syscall" "time" "git.arvados.org/arvados.git/lib/config" @@ -23,7 +24,6 @@ import ( "git.arvados.org/arvados.git/sdk/go/ctxlog" "github.com/jmoiron/sqlx" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/expfmt" check "gopkg.in/check.v1" ) @@ -90,21 +90,29 @@ var stubMounts = map[string][]arvados.KeepMount{ UUID: "zzzzz-ivpuk-000000000000000", DeviceID: "keep0-vol0", StorageClasses: map[string]bool{"default": true}, + AllowWrite: true, + AllowTrash: true, }}, "keep1.zzzzz.arvadosapi.com:25107": {{ UUID: "zzzzz-ivpuk-100000000000000", DeviceID: "keep1-vol0", StorageClasses: map[string]bool{"default": true}, + AllowWrite: true, + AllowTrash: true, }}, "keep2.zzzzz.arvadosapi.com:25107": {{ UUID: "zzzzz-ivpuk-200000000000000", DeviceID: "keep2-vol0", StorageClasses: map[string]bool{"default": true}, + AllowWrite: true, + AllowTrash: true, }}, "keep3.zzzzz.arvadosapi.com:25107": {{ UUID: "zzzzz-ivpuk-300000000000000", DeviceID: "keep3-vol0", StorageClasses: map[string]bool{"default": true}, + AllowWrite: true, + AllowTrash: true, }}, } @@ -255,26 +263,32 @@ func (s *stubServer) serveKeepstoreMounts() *reqTracker { } func (s *stubServer) serveKeepstoreIndexFoo4Bar1() *reqTracker { + fooLine := func(mt int) string { return fmt.Sprintf("acbd18db4cc2f85cedef654fccc4a4d8+3 %d\n", 12345678+mt) } + barLine := "37b51d194a7513e45b56f6524f2d51f2+3 12345678\n" rt := &reqTracker{} s.mux.HandleFunc("/index/", func(w http.ResponseWriter, r *http.Request) { count := rt.Add(r) - if r.Host == "keep0.zzzzz.arvadosapi.com:25107" { - io.WriteString(w, "37b51d194a7513e45b56f6524f2d51f2+3 12345678\n") + if r.Host == "keep0.zzzzz.arvadosapi.com:25107" && strings.HasPrefix(barLine, r.URL.Path[7:]) { + io.WriteString(w, barLine) } - fmt.Fprintf(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 %d\n\n", 12345678+count) + if strings.HasPrefix(fooLine(count), r.URL.Path[7:]) { + io.WriteString(w, fooLine(count)) + } + io.WriteString(w, "\n") }) for _, mounts := range stubMounts { for i, mnt := range mounts { i := i s.mux.HandleFunc(fmt.Sprintf("/mounts/%s/blocks", mnt.UUID), func(w http.ResponseWriter, r *http.Request) { count := rt.Add(r) - if i == 0 && r.Host == "keep0.zzzzz.arvadosapi.com:25107" { - io.WriteString(w, "37b51d194a7513e45b56f6524f2d51f2+3 12345678\n") + r.ParseForm() + if i == 0 && r.Host == "keep0.zzzzz.arvadosapi.com:25107" && strings.HasPrefix(barLine, r.Form.Get("prefix")) { + io.WriteString(w, barLine) } - if i == 0 { - fmt.Fprintf(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 %d\n", 12345678+count) + if i == 0 && strings.HasPrefix(fooLine(count), r.Form.Get("prefix")) { + io.WriteString(w, fooLine(count)) } - fmt.Fprintf(w, "\n") + io.WriteString(w, "\n") }) } } @@ -282,21 +296,44 @@ func (s *stubServer) serveKeepstoreIndexFoo4Bar1() *reqTracker { } func (s *stubServer) serveKeepstoreIndexFoo1() *reqTracker { + fooLine := "acbd18db4cc2f85cedef654fccc4a4d8+3 12345678\n" rt := &reqTracker{} s.mux.HandleFunc("/index/", func(w http.ResponseWriter, r *http.Request) { rt.Add(r) - io.WriteString(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 12345678\n\n") + if r.Host == "keep0.zzzzz.arvadosapi.com:25107" && strings.HasPrefix(fooLine, r.URL.Path[7:]) { + io.WriteString(w, fooLine) + } + io.WriteString(w, "\n") }) for _, mounts := range stubMounts { for i, mnt := range mounts { i := i s.mux.HandleFunc(fmt.Sprintf("/mounts/%s/blocks", mnt.UUID), func(w http.ResponseWriter, r *http.Request) { rt.Add(r) - if i == 0 { - io.WriteString(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 12345678\n\n") - } else { - io.WriteString(w, "\n") + if i == 0 && strings.HasPrefix(fooLine, r.Form.Get("prefix")) { + io.WriteString(w, fooLine) } + io.WriteString(w, "\n") + }) + } + } + return rt +} + +func (s *stubServer) serveKeepstoreIndexIgnoringPrefix() *reqTracker { + fooLine := "acbd18db4cc2f85cedef654fccc4a4d8+3 12345678\n" + rt := &reqTracker{} + s.mux.HandleFunc("/index/", func(w http.ResponseWriter, r *http.Request) { + rt.Add(r) + io.WriteString(w, fooLine) + io.WriteString(w, "\n") + }) + for _, mounts := range stubMounts { + for _, mnt := range mounts { + s.mux.HandleFunc(fmt.Sprintf("/mounts/%s/blocks", mnt.UUID), func(w http.ResponseWriter, r *http.Request) { + rt.Add(r) + io.WriteString(w, fooLine) + io.WriteString(w, "\n") }) } } @@ -360,9 +397,7 @@ func (s *runSuite) TestRefuseZeroCollections(c *check.C) { _, err := s.db.Exec(`delete from collections`) c.Assert(err, check.IsNil) opts := RunOptions{ - CommitPulls: true, - CommitTrash: true, - Logger: ctxlog.TestLogger(c), + Logger: ctxlog.TestLogger(c), } s.stub.serveCurrentUserAdmin() s.stub.serveZeroCollections() @@ -372,18 +407,37 @@ func (s *runSuite) TestRefuseZeroCollections(c *check.C) { trashReqs := s.stub.serveKeepstoreTrash() pullReqs := s.stub.serveKeepstorePull() srv := s.newServer(&opts) - _, err = srv.runOnce() + _, err = srv.runOnce(context.Background()) c.Check(err, check.ErrorMatches, "received zero collections") c.Check(trashReqs.Count(), check.Equals, 4) c.Check(pullReqs.Count(), check.Equals, 0) } -func (s *runSuite) TestRefuseNonAdmin(c *check.C) { +func (s *runSuite) TestRefuseBadIndex(c *check.C) { opts := RunOptions{ - CommitPulls: true, - CommitTrash: true, + ChunkPrefix: "abc", Logger: ctxlog.TestLogger(c), } + s.stub.serveCurrentUserAdmin() + s.stub.serveFooBarFileCollections() + s.stub.serveKeepServices(stubServices) + s.stub.serveKeepstoreMounts() + s.stub.serveKeepstoreIndexIgnoringPrefix() + trashReqs := s.stub.serveKeepstoreTrash() + pullReqs := s.stub.serveKeepstorePull() + srv := s.newServer(&opts) + bal, err := srv.runOnce(context.Background()) + c.Check(err, check.ErrorMatches, ".*Index response included block .* despite asking for prefix \"abc\"") + c.Check(trashReqs.Count(), check.Equals, 4) + c.Check(pullReqs.Count(), check.Equals, 0) + c.Check(bal.stats.trashes, check.Equals, 0) + c.Check(bal.stats.pulls, check.Equals, 0) +} + +func (s *runSuite) TestRefuseNonAdmin(c *check.C) { + opts := RunOptions{ + Logger: ctxlog.TestLogger(c), + } s.stub.serveCurrentUserNotAdmin() s.stub.serveZeroCollections() s.stub.serveKeepServices(stubServices) @@ -391,17 +445,44 @@ func (s *runSuite) TestRefuseNonAdmin(c *check.C) { trashReqs := s.stub.serveKeepstoreTrash() pullReqs := s.stub.serveKeepstorePull() srv := s.newServer(&opts) - _, err := srv.runOnce() + _, err := srv.runOnce(context.Background()) c.Check(err, check.ErrorMatches, "current user .* is not .* admin user") c.Check(trashReqs.Count(), check.Equals, 0) c.Check(pullReqs.Count(), check.Equals, 0) } +func (s *runSuite) TestInvalidChunkPrefix(c *check.C) { + for _, trial := range []struct { + prefix string + errRe string + }{ + {"123ABC", "invalid char \"A\" in chunk prefix.*"}, + {"123xyz", "invalid char \"x\" in chunk prefix.*"}, + {"123456789012345678901234567890123", "invalid chunk prefix .* longer than a block hash"}, + } { + s.SetUpTest(c) + c.Logf("trying invalid prefix %q", trial.prefix) + opts := RunOptions{ + ChunkPrefix: trial.prefix, + Logger: ctxlog.TestLogger(c), + } + s.stub.serveCurrentUserAdmin() + s.stub.serveFooBarFileCollections() + s.stub.serveKeepServices(stubServices) + s.stub.serveKeepstoreMounts() + trashReqs := s.stub.serveKeepstoreTrash() + pullReqs := s.stub.serveKeepstorePull() + srv := s.newServer(&opts) + _, err := srv.runOnce(context.Background()) + c.Check(err, check.ErrorMatches, trial.errRe) + c.Check(trashReqs.Count(), check.Equals, 0) + c.Check(pullReqs.Count(), check.Equals, 0) + } +} + func (s *runSuite) TestRefuseSameDeviceDifferentVolumes(c *check.C) { opts := RunOptions{ - CommitPulls: true, - CommitTrash: true, - Logger: ctxlog.TestLogger(c), + Logger: ctxlog.TestLogger(c), } s.stub.serveCurrentUserAdmin() s.stub.serveZeroCollections() @@ -417,7 +498,7 @@ func (s *runSuite) TestRefuseSameDeviceDifferentVolumes(c *check.C) { trashReqs := s.stub.serveKeepstoreTrash() pullReqs := s.stub.serveKeepstorePull() srv := s.newServer(&opts) - _, err := srv.runOnce() + _, err := srv.runOnce(context.Background()) c.Check(err, check.ErrorMatches, "cannot continue with config errors.*") c.Check(trashReqs.Count(), check.Equals, 0) c.Check(pullReqs.Count(), check.Equals, 0) @@ -429,9 +510,7 @@ func (s *runSuite) TestWriteLostBlocks(c *check.C) { s.config.Collections.BlobMissingReport = lostf.Name() defer os.Remove(lostf.Name()) opts := RunOptions{ - CommitPulls: true, - CommitTrash: true, - Logger: ctxlog.TestLogger(c), + Logger: ctxlog.TestLogger(c), } s.stub.serveCurrentUserAdmin() s.stub.serveFooBarFileCollections() @@ -442,7 +521,7 @@ func (s *runSuite) TestWriteLostBlocks(c *check.C) { s.stub.serveKeepstorePull() srv := s.newServer(&opts) c.Assert(err, check.IsNil) - _, err = srv.runOnce() + _, err = srv.runOnce(context.Background()) c.Check(err, check.IsNil) lost, err := ioutil.ReadFile(lostf.Name()) c.Assert(err, check.IsNil) @@ -450,10 +529,10 @@ func (s *runSuite) TestWriteLostBlocks(c *check.C) { } func (s *runSuite) TestDryRun(c *check.C) { + s.config.Collections.BalanceTrashLimit = 0 + s.config.Collections.BalancePullLimit = 0 opts := RunOptions{ - CommitPulls: false, - CommitTrash: false, - Logger: ctxlog.TestLogger(c), + Logger: ctxlog.TestLogger(c), } s.stub.serveCurrentUserAdmin() collReqs := s.stub.serveFooBarFileCollections() @@ -463,7 +542,7 @@ func (s *runSuite) TestDryRun(c *check.C) { trashReqs := s.stub.serveKeepstoreTrash() pullReqs := s.stub.serveKeepstorePull() srv := s.newServer(&opts) - bal, err := srv.runOnce() + bal, err := srv.runOnce(context.Background()) c.Check(err, check.IsNil) for _, req := range collReqs.reqs { c.Check(req.Form.Get("include_trash"), check.Equals, "true") @@ -471,19 +550,24 @@ func (s *runSuite) TestDryRun(c *check.C) { } c.Check(trashReqs.Count(), check.Equals, 0) c.Check(pullReqs.Count(), check.Equals, 0) - c.Check(bal.stats.pulls, check.Not(check.Equals), 0) + c.Check(bal.stats.pulls, check.Equals, 0) + c.Check(bal.stats.pullsDeferred, check.Not(check.Equals), 0) + c.Check(bal.stats.trashes, check.Equals, 0) + c.Check(bal.stats.trashesDeferred, check.Not(check.Equals), 0) c.Check(bal.stats.underrep.replicas, check.Not(check.Equals), 0) c.Check(bal.stats.overrep.replicas, check.Not(check.Equals), 0) + + metrics := arvadostest.GatherMetricsAsString(srv.Metrics.reg) + c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_trash_entries_deferred_count [1-9].*`) + c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_pull_entries_deferred_count [1-9].*`) } func (s *runSuite) TestCommit(c *check.C) { s.config.Collections.BlobMissingReport = c.MkDir() + "/keep-balance-lost-blocks-test-" s.config.ManagementToken = "xyzzy" opts := RunOptions{ - CommitPulls: true, - CommitTrash: true, - Logger: ctxlog.TestLogger(c), - Dumper: ctxlog.TestLogger(c), + Logger: ctxlog.TestLogger(c), + Dumper: ctxlog.TestLogger(c), } s.stub.serveCurrentUserAdmin() s.stub.serveFooBarFileCollections() @@ -493,7 +577,7 @@ func (s *runSuite) TestCommit(c *check.C) { trashReqs := s.stub.serveKeepstoreTrash() pullReqs := s.stub.serveKeepstorePull() srv := s.newServer(&opts) - bal, err := srv.runOnce() + bal, err := srv.runOnce(context.Background()) c.Check(err, check.IsNil) c.Check(trashReqs.Count(), check.Equals, 8) c.Check(pullReqs.Count(), check.Equals, 4) @@ -507,21 +591,48 @@ func (s *runSuite) TestCommit(c *check.C) { c.Assert(err, check.IsNil) c.Check(string(lost), check.Not(check.Matches), `(?ms).*acbd18db4cc2f85cedef654fccc4a4d8.*`) - buf, err := s.getMetrics(c, srv) - c.Check(err, check.IsNil) - bufstr := buf.String() - c.Check(bufstr, check.Matches, `(?ms).*\narvados_keep_total_bytes 15\n.*`) - c.Check(bufstr, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_sum [0-9\.]+\n.*`) - c.Check(bufstr, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_count 1\n.*`) - c.Check(bufstr, check.Matches, `(?ms).*\narvados_keep_dedup_byte_ratio [1-9].*`) - c.Check(bufstr, check.Matches, `(?ms).*\narvados_keep_dedup_block_ratio [1-9].*`) + metrics := arvadostest.GatherMetricsAsString(srv.Metrics.reg) + c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_total_bytes 15\n.*`) + c.Check(metrics, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_sum [0-9\.]+\n.*`) + c.Check(metrics, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_count 1\n.*`) + c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_dedup_byte_ratio [1-9].*`) + c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_dedup_block_ratio [1-9].*`) + + for _, cat := range []string{ + "dedup_byte_ratio", "dedup_block_ratio", "collection_bytes", + "referenced_bytes", "referenced_blocks", "reference_count", + "pull_entries_sent_count", + "trash_entries_sent_count", + } { + c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_`+cat+` [1-9].*`) + } + + for _, cat := range []string{ + "pull_entries_deferred_count", + "trash_entries_deferred_count", + } { + c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_`+cat+` 0\n.*`) + } + + c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_replicated_block_count{replicas="0"} [1-9].*`) + c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_replicated_block_count{replicas="1"} [1-9].*`) + c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_replicated_block_count{replicas="9"} 0\n.*`) + + for _, sub := range []string{"replicas", "blocks", "bytes"} { + for _, cat := range []string{"needed", "unneeded", "unachievable", "pulling"} { + c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_usage_`+sub+`{status="`+cat+`",storage_class="default"} [1-9].*`) + } + for _, cat := range []string{"total", "garbage", "transient", "overreplicated", "underreplicated", "unachievable", "balanced", "desired", "lost"} { + c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_`+cat+`_`+sub+` [0-9].*`) + } + } + c.Logf("%s", metrics) } -func (s *runSuite) TestRunForever(c *check.C) { - s.config.ManagementToken = "xyzzy" +func (s *runSuite) TestChunkPrefix(c *check.C) { + s.config.Collections.BlobMissingReport = c.MkDir() + "/keep-balance-lost-blocks-test-" opts := RunOptions{ - CommitPulls: true, - CommitTrash: true, + ChunkPrefix: "ac", // catch "foo" but not "bar" Logger: ctxlog.TestLogger(c), Dumper: ctxlog.TestLogger(c), } @@ -532,46 +643,124 @@ func (s *runSuite) TestRunForever(c *check.C) { s.stub.serveKeepstoreIndexFoo4Bar1() trashReqs := s.stub.serveKeepstoreTrash() pullReqs := s.stub.serveKeepstorePull() + srv := s.newServer(&opts) + bal, err := srv.runOnce(context.Background()) + c.Check(err, check.IsNil) + c.Check(trashReqs.Count(), check.Equals, 8) + c.Check(pullReqs.Count(), check.Equals, 4) + // "foo" block is overreplicated by 2 + c.Check(bal.stats.trashes, check.Equals, 2) + // "bar" block is underreplicated but does not match prefix + c.Check(bal.stats.pulls, check.Equals, 0) + + lost, err := ioutil.ReadFile(s.config.Collections.BlobMissingReport) + c.Assert(err, check.IsNil) + c.Check(string(lost), check.Equals, "") +} - stop := make(chan interface{}) - s.config.Collections.BalancePeriod = arvados.Duration(time.Millisecond) +func (s *runSuite) TestRunForever_TriggeredByTimer(c *check.C) { + s.config.ManagementToken = "xyzzy" + opts := RunOptions{ + Logger: ctxlog.TestLogger(c), + Dumper: ctxlog.TestLogger(c), + } + s.stub.serveCurrentUserAdmin() + s.stub.serveFooBarFileCollections() + s.stub.serveKeepServices(stubServices) + s.stub.serveKeepstoreMounts() + s.stub.serveKeepstoreIndexFoo4Bar1() + trashReqs := s.stub.serveKeepstoreTrash() + pullReqs := s.stub.serveKeepstorePull() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s.config.Collections.BalancePeriod = arvados.Duration(10 * time.Millisecond) srv := s.newServer(&opts) done := make(chan bool) go func() { - srv.runForever(stop) + srv.runForever(ctx) close(done) }() // Each run should send 4 pull lists + 4 trash lists. The // first run should also send 4 empty trash lists at - // startup. We should complete all four runs in much less than - // a second. - for t0 := time.Now(); pullReqs.Count() < 16 && time.Since(t0) < 10*time.Second; { + // startup. We should complete at least four runs in much less + // than 10s. + for t0 := time.Now(); time.Since(t0) < 10*time.Second; { + pulls := pullReqs.Count() + if pulls >= 16 && trashReqs.Count() == pulls+4 { + break + } time.Sleep(time.Millisecond) } - stop <- true + cancel() <-done c.Check(pullReqs.Count() >= 16, check.Equals, true) - c.Check(trashReqs.Count(), check.Equals, pullReqs.Count()+4) + c.Check(trashReqs.Count() >= 20, check.Equals, true) - buf, err := s.getMetrics(c, srv) - c.Check(err, check.IsNil) - c.Check(buf, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_count `+fmt.Sprintf("%d", pullReqs.Count()/4)+`\n.*`) + // We should have completed 4 runs before calling cancel(). + // But the next run might also have started before we called + // cancel(), in which case the extra run will be included in + // the changeset_compute_seconds_count metric. + completed := pullReqs.Count() / 4 + metrics := arvadostest.GatherMetricsAsString(srv.Metrics.reg) + c.Check(metrics, check.Matches, fmt.Sprintf(`(?ms).*\narvados_keepbalance_changeset_compute_seconds_count (%d|%d)\n.*`, completed, completed+1)) } -func (s *runSuite) getMetrics(c *check.C, srv *Server) (*bytes.Buffer, error) { - mfs, err := srv.Metrics.reg.Gather() - if err != nil { - return nil, err +func (s *runSuite) TestRunForever_TriggeredBySignal(c *check.C) { + s.config.ManagementToken = "xyzzy" + opts := RunOptions{ + Logger: ctxlog.TestLogger(c), + Dumper: ctxlog.TestLogger(c), } + s.stub.serveCurrentUserAdmin() + s.stub.serveFooBarFileCollections() + s.stub.serveKeepServices(stubServices) + s.stub.serveKeepstoreMounts() + s.stub.serveKeepstoreIndexFoo4Bar1() + trashReqs := s.stub.serveKeepstoreTrash() + pullReqs := s.stub.serveKeepstorePull() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s.config.Collections.BalancePeriod = arvados.Duration(time.Minute) + srv := s.newServer(&opts) - var buf bytes.Buffer - for _, mf := range mfs { - if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil { - return nil, err + done := make(chan bool) + go func() { + srv.runForever(ctx) + close(done) + }() + + procself, err := os.FindProcess(os.Getpid()) + c.Assert(err, check.IsNil) + + // Each run should send 4 pull lists + 4 trash lists. The + // first run should also send 4 empty trash lists at + // startup. We should be able to complete four runs in much + // less than 10s. + completedRuns := 0 + for t0 := time.Now(); time.Since(t0) < 10*time.Second; { + pulls := pullReqs.Count() + if pulls >= 16 && trashReqs.Count() == pulls+4 { + break } + // Once the 1st run has started automatically, we + // start sending a single SIGUSR1 at the end of each + // run, to ensure we get exactly 4 runs in total. + if pulls > 0 && pulls%4 == 0 && pulls <= 12 && pulls/4 > completedRuns { + completedRuns = pulls / 4 + c.Logf("completed run %d, sending SIGUSR1 to trigger next run", completedRuns) + procself.Signal(syscall.SIGUSR1) + } + time.Sleep(time.Millisecond) } + cancel() + <-done + c.Check(pullReqs.Count(), check.Equals, 16) + c.Check(trashReqs.Count(), check.Equals, 20) - return &buf, nil + metrics := arvadostest.GatherMetricsAsString(srv.Metrics.reg) + c.Check(metrics, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_count 4\n.*`) }