1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
20 "git.curoverse.com/arvados.git/sdk/go/arvados"
21 "git.curoverse.com/arvados.git/sdk/go/keepclient"
24 // CheckConfig returns an error if anything is wrong with the given
25 // config and runOptions.
26 func CheckConfig(config Config, runOptions RunOptions) error {
27 if len(config.KeepServiceList.Items) > 0 && config.KeepServiceTypes != nil {
28 return fmt.Errorf("cannot specify both KeepServiceList and KeepServiceTypes in config")
30 if !runOptions.Once && config.RunPeriod == arvados.Duration(0) {
31 return fmt.Errorf("you must either use the -once flag, or specify RunPeriod in config")
36 // Balancer compares the contents of keepstore servers with the
37 // collections stored in Arvados, and issues pull/trash requests
38 // needed to get (closer to) the optimal data layout.
40 // In the optimal data layout: every data block referenced by a
41 // collection is replicated at least as many times as desired by the
42 // collection; there are no unreferenced data blocks older than
43 // BlobSignatureTTL; and all N existing replicas of a given data block
44 // are in the N best positions in rendezvous probe order.
45 type Balancer struct {
47 KeepServices map[string]*KeepService
48 DefaultReplication int
55 mountsByClass map[string]map[*KeepMount]bool
57 serviceRoots map[string]string
63 // Run performs a balance operation using the given config and
64 // runOptions, and returns RunOptions suitable for passing to a
65 // subsequent balance operation.
67 // Run should only be called once on a given Balancer object.
71 // runOptions, err = (&Balancer{}).Run(config, runOptions)
72 func (bal *Balancer) Run(config Config, runOptions RunOptions) (nextRunOptions RunOptions, err error) {
73 nextRunOptions = runOptions
75 bal.Dumper = runOptions.Dumper
76 bal.Logger = runOptions.Logger
77 if bal.Logger == nil {
78 bal.Logger = log.New(os.Stderr, "", log.LstdFlags)
81 defer timeMe(bal.Logger, "Run")()
83 if len(config.KeepServiceList.Items) > 0 {
84 err = bal.SetKeepServices(config.KeepServiceList)
86 err = bal.DiscoverKeepServices(&config.Client, config.KeepServiceTypes)
92 for _, srv := range bal.KeepServices {
93 err = srv.discoverMounts(&config.Client)
100 if err = bal.CheckSanityEarly(&config.Client); err != nil {
103 rs := bal.rendezvousState()
104 if runOptions.CommitTrash && rs != runOptions.SafeRendezvousState {
105 if runOptions.SafeRendezvousState != "" {
106 bal.logf("notice: KeepServices list has changed since last run")
108 bal.logf("clearing existing trash lists, in case the new rendezvous order differs from previous run")
109 if err = bal.ClearTrashLists(&config.Client); err != nil {
112 // The current rendezvous state becomes "safe" (i.e.,
113 // OK to compute changes for that state without
114 // clearing existing trash lists) only now, after we
115 // succeed in clearing existing trash lists.
116 nextRunOptions.SafeRendezvousState = rs
118 if err = bal.GetCurrentState(&config.Client, config.CollectionBatchSize, config.CollectionBuffers); err != nil {
121 bal.ComputeChangeSets()
122 bal.PrintStatistics()
123 if err = bal.CheckSanityLate(); err != nil {
126 if runOptions.CommitPulls {
127 err = bal.CommitPulls(&config.Client)
129 // Skip trash if we can't pull. (Too cautious?)
133 if runOptions.CommitTrash {
134 err = bal.CommitTrash(&config.Client)
139 // SetKeepServices sets the list of KeepServices to operate on.
140 func (bal *Balancer) SetKeepServices(srvList arvados.KeepServiceList) error {
141 bal.KeepServices = make(map[string]*KeepService)
142 for _, srv := range srvList.Items {
143 bal.KeepServices[srv.UUID] = &KeepService{
145 ChangeSet: &ChangeSet{},
151 // DiscoverKeepServices sets the list of KeepServices by calling the
152 // API to get a list of all services, and selecting the ones whose
153 // ServiceType is in okTypes.
154 func (bal *Balancer) DiscoverKeepServices(c *arvados.Client, okTypes []string) error {
155 bal.KeepServices = make(map[string]*KeepService)
156 ok := make(map[string]bool)
157 for _, t := range okTypes {
160 return c.EachKeepService(func(srv arvados.KeepService) error {
161 if ok[srv.ServiceType] {
162 bal.KeepServices[srv.UUID] = &KeepService{
164 ChangeSet: &ChangeSet{},
167 bal.logf("skipping %v with service type %q", srv.UUID, srv.ServiceType)
173 func (bal *Balancer) dedupDevices() {
174 rwdev := map[string]*KeepService{}
175 for _, srv := range bal.KeepServices {
176 for _, mnt := range srv.mounts {
177 if !mnt.ReadOnly && mnt.DeviceID != "" {
178 rwdev[mnt.DeviceID] = srv
182 // Drop the readonly mounts whose device is mounted RW
184 for _, srv := range bal.KeepServices {
185 var dedup []*KeepMount
186 for _, mnt := range srv.mounts {
187 if mnt.ReadOnly && rwdev[mnt.DeviceID] != nil {
188 bal.logf("skipping srv %s readonly mount %q because same device %q is mounted read-write on srv %s", srv, mnt.UUID, mnt.DeviceID, rwdev[mnt.DeviceID])
190 dedup = append(dedup, mnt)
197 // CheckSanityEarly checks for configuration and runtime errors that
198 // can be detected before GetCurrentState() and ComputeChangeSets()
201 // If it returns an error, it is pointless to run GetCurrentState or
202 // ComputeChangeSets: after doing so, the statistics would be
203 // meaningless and it would be dangerous to run any Commit methods.
204 func (bal *Balancer) CheckSanityEarly(c *arvados.Client) error {
205 u, err := c.CurrentUser()
207 return fmt.Errorf("CurrentUser(): %v", err)
209 if !u.IsActive || !u.IsAdmin {
210 return fmt.Errorf("current user (%s) is not an active admin user", u.UUID)
212 for _, srv := range bal.KeepServices {
213 if srv.ServiceType == "proxy" {
214 return fmt.Errorf("config error: %s: proxy servers cannot be balanced", srv)
220 // rendezvousState returns a fingerprint (e.g., a sorted list of
221 // UUID+host+port) of the current set of keep services.
222 func (bal *Balancer) rendezvousState() string {
223 srvs := make([]string, 0, len(bal.KeepServices))
224 for _, srv := range bal.KeepServices {
225 srvs = append(srvs, srv.String())
228 return strings.Join(srvs, "; ")
231 // ClearTrashLists sends an empty trash list to each keep
232 // service. Calling this before GetCurrentState avoids races.
234 // When a block appears in an index, we assume that replica will still
235 // exist after we delete other replicas on other servers. However,
236 // it's possible that a previous rebalancing operation made different
237 // decisions (e.g., servers were added/removed, and rendezvous order
238 // changed). In this case, the replica might already be on that
239 // server's trash list, and it might be deleted before we send a
240 // replacement trash list.
242 // We avoid this problem if we clear all trash lists before getting
243 // indexes. (We also assume there is only one rebalancing process
244 // running at a time.)
245 func (bal *Balancer) ClearTrashLists(c *arvados.Client) error {
246 for _, srv := range bal.KeepServices {
247 srv.ChangeSet = &ChangeSet{}
249 return bal.CommitTrash(c)
252 // GetCurrentState determines the current replication state, and the
253 // desired replication level, for every block that is either
254 // retrievable or referenced.
256 // It determines the current replication state by reading the block index
257 // from every known Keep service.
259 // It determines the desired replication level by retrieving all
260 // collection manifests in the database (API server).
262 // It encodes the resulting information in BlockStateMap.
263 func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) error {
264 defer timeMe(bal.Logger, "GetCurrentState")()
265 bal.BlockStateMap = NewBlockStateMap()
267 dd, err := c.DiscoveryDocument()
271 bal.DefaultReplication = dd.DefaultCollectionReplication
272 bal.MinMtime = time.Now().UnixNano() - dd.BlobSignatureTTL*1e9
274 errs := make(chan error, 2+len(bal.KeepServices))
275 wg := sync.WaitGroup{}
277 // Start one goroutine for each KeepService: retrieve the
278 // index, and add the returned blocks to BlockStateMap.
279 for _, srv := range bal.KeepServices {
281 go func(srv *KeepService) {
283 bal.logf("%s: retrieve indexes", srv)
284 for _, mount := range srv.mounts {
285 bal.logf("%s: retrieve index", mount)
286 idx, err := srv.IndexMount(c, mount.UUID, "")
288 errs <- fmt.Errorf("%s: retrieve index: %v", mount, err)
292 // Some other goroutine encountered an
293 // error -- any further effort here
297 bal.logf("%s: add %d replicas to map", mount, len(idx))
298 bal.BlockStateMap.AddReplicas(mount, idx)
299 bal.logf("%s: done", mount)
301 bal.logf("%s: done", srv)
305 // collQ buffers incoming collections so we can start fetching
306 // the next page without waiting for the current page to
307 // finish processing.
308 collQ := make(chan arvados.Collection, bufs)
310 // Start a goroutine to process collections. (We could use a
311 // worker pool here, but even with a single worker we already
312 // process collections much faster than we can retrieve them.)
316 for coll := range collQ {
317 err := bal.addCollection(coll)
328 // Start a goroutine to retrieve all collections from the
329 // Arvados database and send them to collQ for processing.
333 err = EachCollection(c, pageSize,
334 func(coll arvados.Collection) error {
337 // some other GetCurrentState
338 // error happened: no point
341 return fmt.Errorf("")
344 }, func(done, total int) {
345 bal.logf("collections: %d/%d", done, total)
360 func (bal *Balancer) addCollection(coll arvados.Collection) error {
361 blkids, err := coll.SizedDigests()
364 bal.errors = append(bal.errors, fmt.Errorf("%v: %v", coll.UUID, err))
368 repl := bal.DefaultReplication
369 if coll.ReplicationDesired != nil {
370 repl = *coll.ReplicationDesired
372 debugf("%v: %d block x%d", coll.UUID, len(blkids), repl)
373 bal.BlockStateMap.IncreaseDesired(coll.StorageClassesDesired, repl, blkids)
377 // ComputeChangeSets compares, for each known block, the current and
378 // desired replication states. If it is possible to get closer to the
379 // desired state by copying or deleting blocks, it adds those changes
380 // to the relevant KeepServices' ChangeSets.
382 // It does not actually apply any of the computed changes.
383 func (bal *Balancer) ComputeChangeSets() {
384 // This just calls balanceBlock() once for each block, using a
385 // pool of worker goroutines.
386 defer timeMe(bal.Logger, "ComputeChangeSets")()
387 bal.setupLookupTables()
389 type balanceTask struct {
390 blkid arvados.SizedDigest
393 workers := runtime.GOMAXPROCS(-1)
394 todo := make(chan balanceTask, workers)
396 bal.BlockStateMap.Apply(func(blkid arvados.SizedDigest, blk *BlockState) {
404 results := make(chan balanceResult, workers)
406 var wg sync.WaitGroup
407 for i := 0; i < workers; i++ {
410 for work := range todo {
411 results <- bal.balanceBlock(work.blkid, work.blk)
419 bal.collectStatistics(results)
422 func (bal *Balancer) setupLookupTables() {
423 bal.serviceRoots = make(map[string]string)
424 bal.classes = []string{"default"}
425 bal.mountsByClass = map[string]map[*KeepMount]bool{"default": {}}
427 for _, srv := range bal.KeepServices {
428 bal.serviceRoots[srv.UUID] = srv.UUID
429 for _, mnt := range srv.mounts {
432 // All mounts on a read-only service are
433 // effectively read-only.
434 mnt.ReadOnly = mnt.ReadOnly || srv.ReadOnly
436 if len(mnt.StorageClasses) == 0 {
437 bal.mountsByClass["default"][mnt] = true
440 for _, class := range mnt.StorageClasses {
441 if mbc := bal.mountsByClass[class]; mbc == nil {
442 bal.classes = append(bal.classes, class)
443 bal.mountsByClass[class] = map[*KeepMount]bool{mnt: true}
450 // Consider classes in lexicographic order to avoid flapping
451 // between balancing runs. The outcome of the "prefer a mount
452 // we're already planning to use for a different storage
453 // class" case in balanceBlock depends on the order classes
455 sort.Strings(bal.classes)
465 var changeName = map[int]string{
468 changeTrash: "trash",
472 type balanceResult struct {
474 blkid arvados.SizedDigest
477 classState map[string]balancedBlockState
480 // balanceBlock compares current state to desired state for a single
481 // block, and makes the appropriate ChangeSet calls.
482 func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) balanceResult {
483 debugf("balanceBlock: %v %+v", blkid, blk)
486 mnt *KeepMount // never nil
487 repl *Replica // replica already stored here (or nil)
488 want bool // we should pull/leave a replica here
491 // Build a list of all slots (one per mounted volume).
492 slots := make([]slot, 0, bal.mounts)
493 for _, srv := range bal.KeepServices {
494 for _, mnt := range srv.mounts {
496 for r := range blk.Replicas {
497 if blk.Replicas[r].KeepMount == mnt {
498 repl = &blk.Replicas[r]
501 // Initial value of "want" is "have, and can't
502 // delete". These untrashable replicas get
503 // prioritized when sorting slots: otherwise,
504 // non-optimal readonly copies would cause us
506 slots = append(slots, slot{
509 want: repl != nil && (mnt.ReadOnly || repl.Mtime >= bal.MinMtime),
514 uuids := keepclient.NewRootSorter(bal.serviceRoots, string(blkid[:32])).GetSortedRoots()
515 srvRendezvous := make(map[*KeepService]int, len(uuids))
516 for i, uuid := range uuids {
517 srv := bal.KeepServices[uuid]
518 srvRendezvous[srv] = i
521 // Below we set underreplicated=true if we find any storage
522 // class that's currently underreplicated -- in that case we
523 // won't want to trash any replicas.
524 underreplicated := false
526 classState := make(map[string]balancedBlockState, len(bal.classes))
527 unsafeToDelete := make(map[int64]bool, len(slots))
528 for _, class := range bal.classes {
529 desired := blk.Desired[class]
531 countedDev := map[string]bool{}
533 for _, slot := range slots {
534 if slot.repl != nil && bal.mountsByClass[class][slot.mnt] && !countedDev[slot.mnt.DeviceID] {
536 if slot.mnt.DeviceID != "" {
537 countedDev[slot.mnt.DeviceID] = true
541 classState[class] = balancedBlockState{
543 surplus: have - desired,
550 // Sort the slots by desirability.
551 sort.Slice(slots, func(i, j int) bool {
552 si, sj := slots[i], slots[j]
553 if classi, classj := bal.mountsByClass[class][si.mnt], bal.mountsByClass[class][sj.mnt]; classi != classj {
554 // Prefer a mount that satisfies the
556 return bal.mountsByClass[class][si.mnt]
557 } else if wanti, wantj := si.want, si.want; wanti != wantj {
558 // Prefer a mount that will have a
559 // replica no matter what we do here
560 // -- either because it already has an
561 // untrashable replica, or because we
562 // already need it to satisfy a
563 // different storage class.
565 } else if orderi, orderj := srvRendezvous[si.mnt.KeepService], srvRendezvous[sj.mnt.KeepService]; orderi != orderj {
566 // Prefer a better rendezvous
568 return orderi < orderj
569 } else if repli, replj := si.repl != nil, sj.repl != nil; repli != replj {
570 // Prefer a mount that already has a
574 // If pull/trash turns out to be
575 // needed, distribute the
576 // new/remaining replicas uniformly
577 // across qualifying mounts on a given
579 return rendezvousLess(si.mnt.DeviceID, sj.mnt.DeviceID, blkid)
583 // Servers/mounts/devices (with or without existing
584 // replicas) that are part of the best achievable
585 // layout for this storage class.
586 wantSrv := map[*KeepService]bool{}
587 wantMnt := map[*KeepMount]bool{}
588 wantDev := map[string]bool{}
589 // Positions (with existing replicas) that have been
590 // protected (via unsafeToDelete) to ensure we don't
591 // reduce replication below desired level when
592 // trashing replicas that aren't optimal positions for
593 // any storage class.
594 protMnt := map[*KeepMount]bool{}
596 // trySlot tries using a slot to meet requirements,
597 // and returns true if all requirements are met.
598 trySlot := func(i int) bool {
600 if wantDev[slot.mnt.DeviceID] {
601 // Already allocated a replica to this
602 // backend device, possibly on a
606 if len(protMnt) < desired && slot.repl != nil {
607 unsafeToDelete[slot.repl.Mtime] = true
608 protMnt[slot.mnt] = true
610 if len(wantMnt) < desired && (slot.repl != nil || !slot.mnt.ReadOnly) {
612 wantSrv[slot.mnt.KeepService] = true
613 wantMnt[slot.mnt] = true
614 if slot.mnt.DeviceID != "" {
615 wantDev[slot.mnt.DeviceID] = true
618 return len(protMnt) >= desired && len(wantMnt) >= desired
621 // First try to achieve desired replication without
622 // using the same server twice.
624 for i := 0; i < len(slots) && !done; i++ {
625 if !wantSrv[slots[i].mnt.KeepService] {
630 // If that didn't suffice, do another pass without the
631 // "distinct services" restriction. (Achieving the
632 // desired volume replication on fewer than the
633 // desired number of services is better than
634 // underreplicating.)
635 for i := 0; i < len(slots) && !done; i++ {
639 if !underreplicated {
641 for _, slot := range slots {
642 if slot.repl == nil || !bal.mountsByClass[class][slot.mnt] {
645 if safe++; safe >= desired {
649 underreplicated = safe < desired
652 // set the unachievable flag if there aren't enough
653 // slots offering the relevant storage class. (This is
654 // as easy as checking slots[desired] because we
655 // already sorted the qualifying slots to the front.)
656 if desired >= len(slots) || !bal.mountsByClass[class][slots[desired].mnt] {
657 cs := classState[class]
658 cs.unachievable = true
659 classState[class] = cs
662 // Avoid deleting wanted replicas from devices that
663 // are mounted on multiple servers -- even if they
664 // haven't already been added to unsafeToDelete
665 // because the servers report different Mtimes.
666 for _, slot := range slots {
667 if slot.repl != nil && wantDev[slot.mnt.DeviceID] {
668 unsafeToDelete[slot.repl.Mtime] = true
673 // TODO: If multiple replicas are trashable, prefer the oldest
674 // replica that doesn't have a timestamp collision with
677 countedDev := map[string]bool{}
679 for _, slot := range slots {
683 if slot.repl != nil && !countedDev[slot.mnt.DeviceID] {
685 if slot.mnt.DeviceID != "" {
686 countedDev[slot.mnt.DeviceID] = true
692 for _, slot := range slots {
693 // TODO: request a Touch if Mtime is duplicated.
696 case !underreplicated && slot.repl != nil && !slot.want && !unsafeToDelete[slot.repl.Mtime]:
697 slot.mnt.KeepService.AddTrash(Trash{
699 Mtime: slot.repl.Mtime,
703 case len(blk.Replicas) == 0:
705 case slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
706 slot.mnt.KeepService.AddPull(Pull{
708 From: blk.Replicas[0].KeepMount.KeepService,
715 if bal.Dumper != nil {
717 if slot.repl != nil {
718 mtime = slot.repl.Mtime
720 srv := slot.mnt.KeepService
721 changes = append(changes, fmt.Sprintf("%s:%d/%s=%s,%d", srv.ServiceHost, srv.ServicePort, slot.mnt.UUID, changeName[change], mtime))
724 if bal.Dumper != nil {
725 bal.Dumper.Printf("%s have=%d want=%v %s", blkid, have, want, strings.Join(changes, " "))
727 return balanceResult{
732 classState: classState,
736 type blocksNBytes struct {
742 func (bb blocksNBytes) String() string {
743 return fmt.Sprintf("%d replicas (%d blocks, %d bytes)", bb.replicas, bb.blocks, bb.bytes)
746 type balancerStats struct {
751 underrep blocksNBytes
752 unachievable blocksNBytes
753 justright blocksNBytes
759 classStats map[string]replicationStats
762 type replicationStats struct {
766 unachievable blocksNBytes
769 type balancedBlockState struct {
775 func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
777 s.replHistogram = make([]int, 2)
778 s.classStats = make(map[string]replicationStats, len(bal.classes))
779 for result := range results {
780 surplus := result.have - result.want
781 bytes := result.blkid.Size()
783 for class, state := range result.classState {
784 cs := s.classStats[class]
785 if state.unachievable {
786 cs.unachievable.blocks++
787 cs.unachievable.bytes += bytes
789 if state.desired > 0 {
790 cs.desired.replicas += state.desired
792 cs.desired.bytes += bytes * int64(state.desired)
794 if state.surplus > 0 {
795 cs.surplus.replicas += state.surplus
797 cs.surplus.bytes += bytes * int64(state.surplus)
798 } else if state.surplus < 0 {
799 cs.short.replicas += -state.surplus
801 cs.short.bytes += bytes * int64(-state.surplus)
803 s.classStats[class] = cs
807 case result.have == 0 && result.want > 0:
808 s.lost.replicas -= surplus
810 s.lost.bytes += bytes * int64(-surplus)
812 s.underrep.replicas -= surplus
814 s.underrep.bytes += bytes * int64(-surplus)
815 case surplus > 0 && result.want == 0:
816 counter := &s.garbage
817 for _, r := range result.blk.Replicas {
818 if r.Mtime >= bal.MinMtime {
823 counter.replicas += surplus
825 counter.bytes += bytes * int64(surplus)
827 s.overrep.replicas += surplus
829 s.overrep.bytes += bytes * int64(len(result.blk.Replicas)-result.want)
831 s.justright.replicas += result.want
833 s.justright.bytes += bytes * int64(result.want)
837 s.desired.replicas += result.want
839 s.desired.bytes += bytes * int64(result.want)
841 if len(result.blk.Replicas) > 0 {
842 s.current.replicas += len(result.blk.Replicas)
844 s.current.bytes += bytes * int64(len(result.blk.Replicas))
847 for len(s.replHistogram) <= len(result.blk.Replicas) {
848 s.replHistogram = append(s.replHistogram, 0)
850 s.replHistogram[len(result.blk.Replicas)]++
852 for _, srv := range bal.KeepServices {
853 s.pulls += len(srv.ChangeSet.Pulls)
854 s.trashes += len(srv.ChangeSet.Trashes)
859 // PrintStatistics writes statistics about the computed changes to
860 // bal.Logger. It should not be called until ComputeChangeSets has
862 func (bal *Balancer) PrintStatistics() {
864 bal.logf("%s lost (0=have<want)", bal.stats.lost)
865 bal.logf("%s underreplicated (0<have<want)", bal.stats.underrep)
866 bal.logf("%s just right (have=want)", bal.stats.justright)
867 bal.logf("%s overreplicated (have>want>0)", bal.stats.overrep)
868 bal.logf("%s unreferenced (have>want=0, new)", bal.stats.unref)
869 bal.logf("%s garbage (have>want=0, old)", bal.stats.garbage)
870 for _, class := range bal.classes {
871 cs := bal.stats.classStats[class]
873 bal.logf("storage class %q: %s desired", class, cs.desired)
874 bal.logf("storage class %q: %s short", class, cs.short)
875 bal.logf("storage class %q: %s surplus", class, cs.surplus)
876 bal.logf("storage class %q: %s unachievable", class, cs.unachievable)
879 bal.logf("%s total commitment (excluding unreferenced)", bal.stats.desired)
880 bal.logf("%s total usage", bal.stats.current)
882 for _, srv := range bal.KeepServices {
883 bal.logf("%s: %v\n", srv, srv.ChangeSet)
886 bal.printHistogram(60)
890 func (bal *Balancer) printHistogram(hashColumns int) {
891 bal.logf("Replication level distribution (counting N replicas on a single server as N):")
893 for _, count := range bal.stats.replHistogram {
894 if maxCount < count {
898 hashes := strings.Repeat("#", hashColumns)
899 countWidth := 1 + int(math.Log10(float64(maxCount+1)))
900 scaleCount := 10 * float64(hashColumns) / math.Floor(1+10*math.Log10(float64(maxCount+1)))
901 for repl, count := range bal.stats.replHistogram {
902 nHashes := int(scaleCount * math.Log10(float64(count+1)))
903 bal.logf("%2d: %*d %s", repl, countWidth, count, hashes[:nHashes])
907 // CheckSanityLate checks for configuration and runtime errors after
908 // GetCurrentState() and ComputeChangeSets() have finished.
910 // If it returns an error, it is dangerous to run any Commit methods.
911 func (bal *Balancer) CheckSanityLate() error {
912 if bal.errors != nil {
913 for _, err := range bal.errors {
914 bal.logf("deferred error: %v", err)
916 return fmt.Errorf("cannot proceed safely after deferred errors")
919 if bal.collScanned == 0 {
920 return fmt.Errorf("received zero collections")
924 bal.BlockStateMap.Apply(func(_ arvados.SizedDigest, blk *BlockState) {
925 for _, desired := range blk.Desired {
933 return fmt.Errorf("zero blocks have desired replication>0")
936 if dr := bal.DefaultReplication; dr < 1 {
937 return fmt.Errorf("Default replication (%d) is less than 1", dr)
940 // TODO: no two services have identical indexes
941 // TODO: no collisions (same md5, different size)
945 // CommitPulls sends the computed lists of pull requests to the
946 // keepstore servers. This has the effect of increasing replication of
947 // existing blocks that are either underreplicated or poorly
948 // distributed according to rendezvous hashing.
949 func (bal *Balancer) CommitPulls(c *arvados.Client) error {
950 return bal.commitAsync(c, "send pull list",
951 func(srv *KeepService) error {
952 return srv.CommitPulls(c)
956 // CommitTrash sends the computed lists of trash requests to the
957 // keepstore servers. This has the effect of deleting blocks that are
958 // overreplicated or unreferenced.
959 func (bal *Balancer) CommitTrash(c *arvados.Client) error {
960 return bal.commitAsync(c, "send trash list",
961 func(srv *KeepService) error {
962 return srv.CommitTrash(c)
966 func (bal *Balancer) commitAsync(c *arvados.Client, label string, f func(srv *KeepService) error) error {
967 errs := make(chan error)
968 for _, srv := range bal.KeepServices {
969 go func(srv *KeepService) {
971 defer func() { errs <- err }()
972 label := fmt.Sprintf("%s: %v", srv, label)
973 defer timeMe(bal.Logger, label)()
976 err = fmt.Errorf("%s: %v", label, err)
981 for range bal.KeepServices {
982 if err := <-errs; err != nil {
991 func (bal *Balancer) logf(f string, args ...interface{}) {
992 if bal.Logger != nil {
993 bal.Logger.Printf(f, args...)
997 // Rendezvous hash sort function. Less efficient than sorting on
998 // precomputed rendezvous hashes, but also rarely used.
999 func rendezvousLess(i, j string, blkid arvados.SizedDigest) bool {
1000 a := md5.Sum([]byte(string(blkid[:32]) + i))
1001 b := md5.Sum([]byte(string(blkid[:32]) + j))
1002 return bytes.Compare(a[:], b[:]) < 0