# or omitted, pages are processed serially.
BalanceCollectionBuffers: 1000
+ # Maximum time for a rebalancing run. This ensures keep-balance
+ # eventually gives up and retries if, for example, a network
+ # error causes a hung connection that is never closed by the
+ # OS. It should be long enough that it doesn't interrupt a
+ # long-running balancing operation.
+ BalanceTimeout: 6h
+
# Default lifetime for ephemeral collections: 2 weeks. This must not
# be less than BlobSigningTTL.
DefaultTrashLifetime: 336h
"Collections.WebDAVCache": false,
"Collections.BalanceCollectionBatch": false,
"Collections.BalancePeriod": false,
+ "Collections.BalanceTimeout": false,
"Collections.BlobMissingReport": false,
"Collections.BalanceCollectionBuffers": false,
"Containers": true,
# or omitted, pages are processed serially.
BalanceCollectionBuffers: 1000
+ # Maximum time for a rebalancing run. This ensures keep-balance
+ # eventually gives up and retries if, for example, a network
+ # error causes a hung connection that is never closed by the
+ # OS. It should be long enough that it doesn't interrupt a
+ # long-running balancing operation.
+ BalanceTimeout: 6h
+
# Default lifetime for ephemeral collections: 2 weeks. This must not
# be less than BlobSigningTTL.
DefaultTrashLifetime: 336h
cluster.TLS.Insecure = true
cluster.API.MaxItemsPerResponse = 1000
cluster.API.MaxRequestAmplification = 4
+ cluster.API.RequestTimeout = arvados.Duration(5 * time.Minute)
arvadostest.SetServiceURL(&cluster.Services.RailsAPI, "http://localhost:1/")
arvadostest.SetServiceURL(&cluster.Services.Controller, "http://localhost:/")
s.testHandler = &Handler{Cluster: cluster}
PostgreSQL: integrationTestCluster().PostgreSQL,
ForceLegacyAPI14: forceLegacyAPI14,
}
+ s.cluster.API.RequestTimeout = arvados.Duration(5 * time.Minute)
s.cluster.TLS.Insecure = true
arvadostest.SetServiceURL(&s.cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
arvadostest.SetServiceURL(&s.cluster.Services.Controller, "http://localhost:/")
Header: hdrOut,
Body: reqIn.Body,
}).WithContext(reqIn.Context())
-
- resp, err := client.Do(reqOut)
- return resp, err
+ return client.Do(reqOut)
}
// Copy a response (or error) to the downstream client
BalancePeriod Duration
BalanceCollectionBatch int
BalanceCollectionBuffers int
+ BalanceTimeout Duration
WebDAVCache WebDAVCacheConfig
}
defer bal.time("sweep", "wall clock time to run one full sweep")()
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(cluster.Collections.BalanceTimeout.Duration()))
+ defer cancel()
+
var lbFile *os.File
if bal.LostBlocksFile != "" {
tmpfn := bal.LostBlocksFile + ".tmp"
if err = bal.CheckSanityEarly(client); err != nil {
return
}
+
+ // On a big site, indexing and sending trash/pull lists can
+ // take much longer than the usual 5 minute client
+ // timeout. From here on, we rely on the context deadline
+ // instead, aborting the entire operation if any part takes
+ // too long.
+ client.Timeout = 0
+
rs := bal.rendezvousState()
if runOptions.CommitTrash && rs != runOptions.SafeRendezvousState {
if runOptions.SafeRendezvousState != "" {
bal.logf("notice: KeepServices list has changed since last run")
}
bal.logf("clearing existing trash lists, in case the new rendezvous order differs from previous run")
- if err = bal.ClearTrashLists(client); err != nil {
+ if err = bal.ClearTrashLists(ctx, client); err != nil {
return
}
// The current rendezvous state becomes "safe" (i.e.,
nextRunOptions.SafeRendezvousState = rs
}
- // Indexing and sending trash/pull lists can take a long time
- // on a big site. Prefer a long timeout (causing slow recovery
- // from undetected network problems) to a short timeout
- // (causing starvation via perpetual timeout/restart cycle).
- client.Timeout = 24 * time.Hour
-
- if err = bal.GetCurrentState(client, cluster.Collections.BalanceCollectionBatch, cluster.Collections.BalanceCollectionBuffers); err != nil {
+ if err = bal.GetCurrentState(ctx, client, cluster.Collections.BalanceCollectionBatch, cluster.Collections.BalanceCollectionBuffers); err != nil {
return
}
bal.ComputeChangeSets()
lbFile = nil
}
if runOptions.CommitPulls {
- err = bal.CommitPulls(client)
+ err = bal.CommitPulls(ctx, client)
if err != nil {
// Skip trash if we can't pull. (Too cautious?)
return
}
}
if runOptions.CommitTrash {
- err = bal.CommitTrash(client)
+ err = bal.CommitTrash(ctx, client)
}
return
}
// We avoid this problem if we clear all trash lists before getting
// indexes. (We also assume there is only one rebalancing process
// running at a time.)
-func (bal *Balancer) ClearTrashLists(c *arvados.Client) error {
+func (bal *Balancer) ClearTrashLists(ctx context.Context, c *arvados.Client) error {
for _, srv := range bal.KeepServices {
srv.ChangeSet = &ChangeSet{}
}
- return bal.CommitTrash(c)
+ return bal.CommitTrash(ctx, c)
}
// GetCurrentState determines the current replication state, and the
// collection manifests in the database (API server).
//
// It encodes the resulting information in BlockStateMap.
-func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) error {
- ctx, cancel := context.WithCancel(context.Background())
+func (bal *Balancer) GetCurrentState(ctx context.Context, c *arvados.Client, pageSize, bufs int) error {
+ ctx, cancel := context.WithCancel(ctx)
defer cancel()
defer bal.time("get_state", "wall clock time to get current state")()
wg.Add(1)
go func() {
defer wg.Done()
- err = EachCollection(c, pageSize,
+ err = EachCollection(ctx, c, pageSize,
func(coll arvados.Collection) error {
collQ <- coll
if len(errs) > 0 {
// keepstore servers. This has the effect of increasing replication of
// existing blocks that are either underreplicated or poorly
// distributed according to rendezvous hashing.
-func (bal *Balancer) CommitPulls(c *arvados.Client) error {
+func (bal *Balancer) CommitPulls(ctx context.Context, c *arvados.Client) error {
defer bal.time("send_pull_lists", "wall clock time to send pull lists")()
return bal.commitAsync(c, "send pull list",
func(srv *KeepService) error {
- return srv.CommitPulls(c)
+ return srv.CommitPulls(ctx, c)
})
}
// CommitTrash sends the computed lists of trash requests to the
// keepstore servers. This has the effect of deleting blocks that are
// overreplicated or unreferenced.
-func (bal *Balancer) CommitTrash(c *arvados.Client) error {
+func (bal *Balancer) CommitTrash(ctx context.Context, c *arvados.Client) error {
defer bal.time("send_trash_lists", "wall clock time to send trash lists")()
return bal.commitAsync(c, "send trash list",
func(srv *KeepService) error {
- return srv.CommitTrash(c)
+ return srv.CommitTrash(ctx, c)
})
}
package main
import (
+ "context"
"fmt"
"time"
//
// If pageSize > 0 it is used as the maximum page size in each API
// call; otherwise the maximum allowed page size is requested.
-func EachCollection(c *arvados.Client, pageSize int, f func(arvados.Collection) error, progress func(done, total int)) error {
+func EachCollection(ctx context.Context, c *arvados.Client, pageSize int, f func(arvados.Collection) error, progress func(done, total int)) error {
if progress == nil {
progress = func(_, _ int) {}
}
for {
progress(callCount, expectCount)
var page arvados.CollectionList
- err := c.RequestAndDecode(&page, "GET", "arvados/v1/collections", nil, params)
+ err := c.RequestAndDecodeContext(ctx, &page, "GET", "arvados/v1/collections", nil, params)
if err != nil {
return err
}
package main
import (
+ "context"
"sync"
"time"
longestStreak := 0
var lastMod time.Time
sawUUID := make(map[string]bool)
- err := EachCollection(s.client, pageSize, func(c arvados.Collection) error {
+ err := EachCollection(context.Background(), s.client, pageSize, func(c arvados.Collection) error {
if c.ModifiedAt.IsZero() {
return nil
}
package main
import (
+ "context"
"encoding/json"
"fmt"
"io"
// CommitPulls sends the current list of pull requests to the storage
// server (even if the list is empty).
-func (srv *KeepService) CommitPulls(c *arvados.Client) error {
- return srv.put(c, "pull", srv.ChangeSet.Pulls)
+func (srv *KeepService) CommitPulls(ctx context.Context, c *arvados.Client) error {
+ return srv.put(ctx, c, "pull", srv.ChangeSet.Pulls)
}
// CommitTrash sends the current list of trash requests to the storage
// server (even if the list is empty).
-func (srv *KeepService) CommitTrash(c *arvados.Client) error {
- return srv.put(c, "trash", srv.ChangeSet.Trashes)
+func (srv *KeepService) CommitTrash(ctx context.Context, c *arvados.Client) error {
+ return srv.put(ctx, c, "trash", srv.ChangeSet.Trashes)
}
// Perform a PUT request at path, with data (as JSON) in the request
// body.
-func (srv *KeepService) put(c *arvados.Client, path string, data interface{}) error {
+func (srv *KeepService) put(ctx context.Context, c *arvados.Client, path string, data interface{}) error {
// We'll start a goroutine to do the JSON encoding, so we can
// stream it to the http client through a Pipe, rather than
// keeping the entire encoded version in memory.
}()
url := srv.URLBase() + "/" + path
- req, err := http.NewRequest("PUT", url, ioutil.NopCloser(jsonR))
+ req, err := http.NewRequestWithContext(ctx, "PUT", url, ioutil.NopCloser(jsonR))
if err != nil {
return fmt.Errorf("building request for %s: %v", url, err)
}