import (
"bytes"
+ "context"
"crypto/md5"
"fmt"
"io"
"syscall"
"time"
- "git.curoverse.com/arvados.git/sdk/go/arvados"
- "git.curoverse.com/arvados.git/sdk/go/keepclient"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/sirupsen/logrus"
)
defer bal.time("sweep", "wall clock time to run one full sweep")()
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(cluster.Collections.BalanceTimeout.Duration()))
+ defer cancel()
+
var lbFile *os.File
if bal.LostBlocksFile != "" {
tmpfn := bal.LostBlocksFile + ".tmp"
if err = bal.CheckSanityEarly(client); err != nil {
return
}
+
+ // On a big site, indexing and sending trash/pull lists can
+ // take much longer than the usual 5 minute client
+ // timeout. From here on, we rely on the context deadline
+ // instead, aborting the entire operation if any part takes
+ // too long.
+ client.Timeout = 0
+
rs := bal.rendezvousState()
if runOptions.CommitTrash && rs != runOptions.SafeRendezvousState {
if runOptions.SafeRendezvousState != "" {
bal.logf("notice: KeepServices list has changed since last run")
}
bal.logf("clearing existing trash lists, in case the new rendezvous order differs from previous run")
- if err = bal.ClearTrashLists(client); err != nil {
+ if err = bal.ClearTrashLists(ctx, client); err != nil {
return
}
// The current rendezvous state becomes "safe" (i.e.,
// succeed in clearing existing trash lists.
nextRunOptions.SafeRendezvousState = rs
}
- if err = bal.GetCurrentState(client, cluster.Collections.BalanceCollectionBatch, cluster.Collections.BalanceCollectionBuffers); err != nil {
+
+ if err = bal.GetCurrentState(ctx, client, cluster.Collections.BalanceCollectionBatch, cluster.Collections.BalanceCollectionBuffers); err != nil {
return
}
bal.ComputeChangeSets()
lbFile = nil
}
if runOptions.CommitPulls {
- err = bal.CommitPulls(client)
+ err = bal.CommitPulls(ctx, client)
if err != nil {
// Skip trash if we can't pull. (Too cautious?)
return
}
}
if runOptions.CommitTrash {
- err = bal.CommitTrash(client)
+ err = bal.CommitTrash(ctx, client)
}
return
}
// We avoid this problem if we clear all trash lists before getting
// indexes. (We also assume there is only one rebalancing process
// running at a time.)
-func (bal *Balancer) ClearTrashLists(c *arvados.Client) error {
+func (bal *Balancer) ClearTrashLists(ctx context.Context, c *arvados.Client) error {
for _, srv := range bal.KeepServices {
srv.ChangeSet = &ChangeSet{}
}
- return bal.CommitTrash(c)
+ return bal.CommitTrash(ctx, c)
}
// GetCurrentState determines the current replication state, and the
// collection manifests in the database (API server).
//
// It encodes the resulting information in BlockStateMap.
-func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) error {
+func (bal *Balancer) GetCurrentState(ctx context.Context, c *arvados.Client, pageSize, bufs int) error {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
defer bal.time("get_state", "wall clock time to get current state")()
bal.BlockStateMap = NewBlockStateMap()
go func(mounts []*KeepMount) {
defer wg.Done()
bal.logf("mount %s: retrieve index from %s", mounts[0], mounts[0].KeepService)
- idx, err := mounts[0].KeepService.IndexMount(c, mounts[0].UUID, "")
+ idx, err := mounts[0].KeepService.IndexMount(ctx, c, mounts[0].UUID, "")
if err != nil {
select {
case errs <- fmt.Errorf("%s: retrieve index: %v", mounts[0], err):
default:
}
+ cancel()
return
}
if len(errs) > 0 {
}
for range collQ {
}
+ cancel()
return
}
bal.collScanned++
wg.Add(1)
go func() {
defer wg.Done()
- err = EachCollection(c, pageSize,
+ err = EachCollection(ctx, c, pageSize,
func(coll arvados.Collection) error {
collQ <- coll
if len(errs) > 0 {
case errs <- err:
default:
}
+ cancel()
}
}()
changeNone: "none",
}
+type balancedBlockState struct {
+ needed int
+ unneeded int
+ pulling int
+ unachievable bool
+}
+
type balanceResult struct {
blk *BlockState
blkid arvados.SizedDigest
- have int
- want int
+ lost bool
+ blockState balancedBlockState
classState map[string]balancedBlockState
}
+type slot struct {
+ mnt *KeepMount // never nil
+ repl *Replica // replica already stored here (or nil)
+ want bool // we should pull/leave a replica here
+}
+
// balanceBlock compares current state to desired state for a single
// block, and makes the appropriate ChangeSet calls.
func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) balanceResult {
bal.Logger.Debugf("balanceBlock: %v %+v", blkid, blk)
- type slot struct {
- mnt *KeepMount // never nil
- repl *Replica // replica already stored here (or nil)
- want bool // we should pull/leave a replica here
- }
-
// Build a list of all slots (one per mounted volume).
slots := make([]slot, 0, bal.mounts)
for _, srv := range bal.KeepServices {
// won't want to trash any replicas.
underreplicated := false
- classState := make(map[string]balancedBlockState, len(bal.classes))
unsafeToDelete := make(map[int64]bool, len(slots))
for _, class := range bal.classes {
desired := blk.Desired[class]
-
- countedDev := map[string]bool{}
- have := 0
- for _, slot := range slots {
- if slot.repl != nil && bal.mountsByClass[class][slot.mnt] && !countedDev[slot.mnt.DeviceID] {
- have += slot.mnt.Replication
- if slot.mnt.DeviceID != "" {
- countedDev[slot.mnt.DeviceID] = true
- }
- }
- }
- classState[class] = balancedBlockState{
- desired: desired,
- surplus: have - desired,
- }
-
if desired == 0 {
continue
}
underreplicated = safe < desired
}
- // set the unachievable flag if there aren't enough
- // slots offering the relevant storage class. (This is
- // as easy as checking slots[desired] because we
- // already sorted the qualifying slots to the front.)
- if desired >= len(slots) || !bal.mountsByClass[class][slots[desired].mnt] {
- cs := classState[class]
- cs.unachievable = true
- classState[class] = cs
- }
-
// Avoid deleting wanted replicas from devices that
// are mounted on multiple servers -- even if they
// haven't already been added to unsafeToDelete
// replica that doesn't have a timestamp collision with
// others.
- countedDev := map[string]bool{}
- var have, want int
- for _, slot := range slots {
- if countedDev[slot.mnt.DeviceID] {
- continue
- }
- if slot.want {
- want += slot.mnt.Replication
- }
- if slot.repl != nil {
- have += slot.mnt.Replication
- }
- if slot.mnt.DeviceID != "" {
- countedDev[slot.mnt.DeviceID] = true
+ for i, slot := range slots {
+ // Don't trash (1) any replicas of an underreplicated
+ // block, even if they're in the wrong positions, or
+ // (2) any replicas whose Mtimes are identical to
+ // needed replicas (in case we're really seeing the
+ // same copy via different mounts).
+ if slot.repl != nil && (underreplicated || unsafeToDelete[slot.repl.Mtime]) {
+ slots[i].want = true
}
}
+ classState := make(map[string]balancedBlockState, len(bal.classes))
+ for _, class := range bal.classes {
+ classState[class] = computeBlockState(slots, bal.mountsByClass[class], len(blk.Replicas), blk.Desired[class])
+ }
+ blockState := computeBlockState(slots, nil, len(blk.Replicas), 0)
+
+ var lost bool
var changes []string
for _, slot := range slots {
// TODO: request a Touch if Mtime is duplicated.
var change int
switch {
- case !underreplicated && !slot.want && slot.repl != nil && slot.repl.Mtime < bal.MinMtime && !unsafeToDelete[slot.repl.Mtime]:
+ case !slot.want && slot.repl != nil && slot.repl.Mtime < bal.MinMtime:
slot.mnt.KeepService.AddTrash(Trash{
SizedDigest: blkid,
Mtime: slot.repl.Mtime,
From: slot.mnt,
})
change = changeTrash
- case len(blk.Replicas) > 0 && slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
+ case slot.repl == nil && slot.want && len(blk.Replicas) == 0:
+ lost = true
+ change = changeNone
+ case slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
slot.mnt.KeepService.AddPull(Pull{
SizedDigest: blkid,
From: blk.Replicas[0].KeepMount.KeepService,
}
}
if bal.Dumper != nil {
- bal.Dumper.Printf("%s refs=%d have=%d want=%v %v %v", blkid, blk.RefCount, have, want, blk.Desired, changes)
+ bal.Dumper.Printf("%s refs=%d needed=%d unneeded=%d pulling=%v %v %v", blkid, blk.RefCount, blockState.needed, blockState.unneeded, blockState.pulling, blk.Desired, changes)
}
return balanceResult{
blk: blk,
blkid: blkid,
- have: have,
- want: want,
+ lost: lost,
+ blockState: blockState,
classState: classState,
}
}
+func computeBlockState(slots []slot, onlyCount map[*KeepMount]bool, have, needRepl int) (bbs balancedBlockState) {
+ repl := 0
+ countedDev := map[string]bool{}
+ for _, slot := range slots {
+ if onlyCount != nil && !onlyCount[slot.mnt] {
+ continue
+ }
+ if countedDev[slot.mnt.DeviceID] {
+ continue
+ }
+ switch {
+ case slot.repl != nil && slot.want:
+ bbs.needed++
+ repl += slot.mnt.Replication
+ case slot.repl != nil && !slot.want:
+ bbs.unneeded++
+ repl += slot.mnt.Replication
+ case slot.repl == nil && slot.want && have > 0:
+ bbs.pulling++
+ repl += slot.mnt.Replication
+ }
+ if slot.mnt.DeviceID != "" {
+ countedDev[slot.mnt.DeviceID] = true
+ }
+ }
+ if repl < needRepl {
+ bbs.unachievable = true
+ }
+ return
+}
+
type blocksNBytes struct {
replicas int
blocks int
return fmt.Sprintf("%d replicas (%d blocks, %d bytes)", bb.replicas, bb.blocks, bb.bytes)
}
+type replicationStats struct {
+ needed blocksNBytes
+ unneeded blocksNBytes
+ pulling blocksNBytes
+ unachievable blocksNBytes
+}
+
type balancerStats struct {
lost blocksNBytes
overrep blocksNBytes
return float64(s.collectionBlockRefs) / float64(s.collectionBlocks)
}
-type replicationStats struct {
- desired blocksNBytes
- surplus blocksNBytes
- short blocksNBytes
- unachievable blocksNBytes
-}
-
-type balancedBlockState struct {
- desired int
- surplus int
- unachievable bool
-}
-
func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
var s balancerStats
s.replHistogram = make([]int, 2)
s.classStats = make(map[string]replicationStats, len(bal.classes))
for result := range results {
- surplus := result.have - result.want
bytes := result.blkid.Size()
if rc := int64(result.blk.RefCount); rc > 0 {
for class, state := range result.classState {
cs := s.classStats[class]
if state.unachievable {
+ cs.unachievable.replicas++
cs.unachievable.blocks++
cs.unachievable.bytes += bytes
}
- if state.desired > 0 {
- cs.desired.replicas += state.desired
- cs.desired.blocks++
- cs.desired.bytes += bytes * int64(state.desired)
+ if state.needed > 0 {
+ cs.needed.replicas += state.needed
+ cs.needed.blocks++
+ cs.needed.bytes += bytes * int64(state.needed)
}
- if state.surplus > 0 {
- cs.surplus.replicas += state.surplus
- cs.surplus.blocks++
- cs.surplus.bytes += bytes * int64(state.surplus)
- } else if state.surplus < 0 {
- cs.short.replicas += -state.surplus
- cs.short.blocks++
- cs.short.bytes += bytes * int64(-state.surplus)
+ if state.unneeded > 0 {
+ cs.unneeded.replicas += state.unneeded
+ cs.unneeded.blocks++
+ cs.unneeded.bytes += bytes * int64(state.unneeded)
+ }
+ if state.pulling > 0 {
+ cs.pulling.replicas += state.pulling
+ cs.pulling.blocks++
+ cs.pulling.bytes += bytes * int64(state.pulling)
}
s.classStats[class] = cs
}
+ bs := result.blockState
switch {
- case result.have == 0 && result.want > 0:
- s.lost.replicas -= surplus
+ case result.lost:
+ s.lost.replicas++
s.lost.blocks++
- s.lost.bytes += bytes * int64(-surplus)
+ s.lost.bytes += bytes
fmt.Fprintf(bal.lostBlocks, "%s", strings.SplitN(string(result.blkid), "+", 2)[0])
for pdh := range result.blk.Refs {
fmt.Fprintf(bal.lostBlocks, " %s", pdh)
}
fmt.Fprint(bal.lostBlocks, "\n")
- case surplus < 0:
- s.underrep.replicas -= surplus
+ case bs.pulling > 0:
+ s.underrep.replicas += bs.pulling
+ s.underrep.blocks++
+ s.underrep.bytes += bytes * int64(bs.pulling)
+ case bs.unachievable:
+ s.underrep.replicas++
s.underrep.blocks++
- s.underrep.bytes += bytes * int64(-surplus)
- case surplus > 0 && result.want == 0:
+ s.underrep.bytes += bytes
+ case bs.unneeded > 0 && bs.needed == 0:
+ // Count as "garbage" if all replicas are old
+ // enough to trash, otherwise count as
+ // "unref".
counter := &s.garbage
for _, r := range result.blk.Replicas {
if r.Mtime >= bal.MinMtime {
break
}
}
- counter.replicas += surplus
+ counter.replicas += bs.unneeded
counter.blocks++
- counter.bytes += bytes * int64(surplus)
- case surplus > 0:
- s.overrep.replicas += surplus
+ counter.bytes += bytes * int64(bs.unneeded)
+ case bs.unneeded > 0:
+ s.overrep.replicas += bs.unneeded
s.overrep.blocks++
- s.overrep.bytes += bytes * int64(result.have-result.want)
+ s.overrep.bytes += bytes * int64(bs.unneeded)
default:
- s.justright.replicas += result.want
+ s.justright.replicas += bs.needed
s.justright.blocks++
- s.justright.bytes += bytes * int64(result.want)
+ s.justright.bytes += bytes * int64(bs.needed)
}
- if result.want > 0 {
- s.desired.replicas += result.want
+ if bs.needed > 0 {
+ s.desired.replicas += bs.needed
s.desired.blocks++
- s.desired.bytes += bytes * int64(result.want)
+ s.desired.bytes += bytes * int64(bs.needed)
}
- if result.have > 0 {
- s.current.replicas += result.have
+ if bs.needed+bs.unneeded > 0 {
+ s.current.replicas += bs.needed + bs.unneeded
s.current.blocks++
- s.current.bytes += bytes * int64(result.have)
+ s.current.bytes += bytes * int64(bs.needed+bs.unneeded)
}
- for len(s.replHistogram) <= result.have {
+ for len(s.replHistogram) <= bs.needed+bs.unneeded {
s.replHistogram = append(s.replHistogram, 0)
}
- s.replHistogram[result.have]++
+ s.replHistogram[bs.needed+bs.unneeded]++
}
for _, srv := range bal.KeepServices {
s.pulls += len(srv.ChangeSet.Pulls)
for _, class := range bal.classes {
cs := bal.stats.classStats[class]
bal.logf("===")
- bal.logf("storage class %q: %s desired", class, cs.desired)
- bal.logf("storage class %q: %s short", class, cs.short)
- bal.logf("storage class %q: %s surplus", class, cs.surplus)
+ bal.logf("storage class %q: %s needed", class, cs.needed)
+ bal.logf("storage class %q: %s unneeded", class, cs.unneeded)
+ bal.logf("storage class %q: %s pulling", class, cs.pulling)
bal.logf("storage class %q: %s unachievable", class, cs.unachievable)
}
bal.logf("===")
}
func (bal *Balancer) printHistogram(hashColumns int) {
- bal.logf("Replication level distribution (counting N replicas on a single server as N):")
+ bal.logf("Replication level distribution:")
maxCount := 0
for _, count := range bal.stats.replHistogram {
if maxCount < count {
// keepstore servers. This has the effect of increasing replication of
// existing blocks that are either underreplicated or poorly
// distributed according to rendezvous hashing.
-func (bal *Balancer) CommitPulls(c *arvados.Client) error {
+func (bal *Balancer) CommitPulls(ctx context.Context, c *arvados.Client) error {
defer bal.time("send_pull_lists", "wall clock time to send pull lists")()
return bal.commitAsync(c, "send pull list",
func(srv *KeepService) error {
- return srv.CommitPulls(c)
+ return srv.CommitPulls(ctx, c)
})
}
// CommitTrash sends the computed lists of trash requests to the
// keepstore servers. This has the effect of deleting blocks that are
// overreplicated or unreferenced.
-func (bal *Balancer) CommitTrash(c *arvados.Client) error {
+func (bal *Balancer) CommitTrash(ctx context.Context, c *arvados.Client) error {
defer bal.time("send_trash_lists", "wall clock time to send trash lists")()
return bal.commitAsync(c, "send trash list",
func(srv *KeepService) error {
- return srv.CommitTrash(c)
+ return srv.CommitTrash(ctx, c)
})
}