1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
23 "git.curoverse.com/arvados.git/sdk/go/arvados"
24 "git.curoverse.com/arvados.git/sdk/go/keepclient"
25 "github.com/sirupsen/logrus"
28 // Balancer compares the contents of keepstore servers with the
29 // collections stored in Arvados, and issues pull/trash requests
30 // needed to get (closer to) the optimal data layout.
32 // In the optimal data layout: every data block referenced by a
33 // collection is replicated at least as many times as desired by the
34 // collection; there are no unreferenced data blocks older than
35 // BlobSignatureTTL; and all N existing replicas of a given data block
36 // are in the N best positions in rendezvous probe order.
37 type Balancer struct {
38 Logger logrus.FieldLogger
39 Dumper logrus.FieldLogger
45 KeepServices map[string]*KeepService
46 DefaultReplication int
51 mountsByClass map[string]map[*KeepMount]bool
53 serviceRoots map[string]string
60 // Run performs a balance operation using the given config and
61 // runOptions, and returns RunOptions suitable for passing to a
62 // subsequent balance operation.
64 // Run should only be called once on a given Balancer object.
68 // runOptions, err = (&Balancer{}).Run(config, runOptions)
69 func (bal *Balancer) Run(client *arvados.Client, cluster *arvados.Cluster, runOptions RunOptions) (nextRunOptions RunOptions, err error) {
70 nextRunOptions = runOptions
72 defer bal.time("sweep", "wall clock time to run one full sweep")()
75 if bal.LostBlocksFile != "" {
76 tmpfn := bal.LostBlocksFile + ".tmp"
77 lbFile, err = os.OpenFile(tmpfn, os.O_CREATE|os.O_WRONLY, 0777)
82 err = syscall.Flock(int(lbFile.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
87 // Remove the tempfile only if we didn't get
88 // as far as successfully renaming it.
93 bal.lostBlocks = lbFile
95 bal.lostBlocks = ioutil.Discard
98 diskService := []string{"disk"}
99 err = bal.DiscoverKeepServices(client, diskService)
104 for _, srv := range bal.KeepServices {
105 err = srv.discoverMounts(client)
112 if err = bal.CheckSanityEarly(client); err != nil {
115 rs := bal.rendezvousState()
116 if runOptions.CommitTrash && rs != runOptions.SafeRendezvousState {
117 if runOptions.SafeRendezvousState != "" {
118 bal.logf("notice: KeepServices list has changed since last run")
120 bal.logf("clearing existing trash lists, in case the new rendezvous order differs from previous run")
121 if err = bal.ClearTrashLists(client); err != nil {
124 // The current rendezvous state becomes "safe" (i.e.,
125 // OK to compute changes for that state without
126 // clearing existing trash lists) only now, after we
127 // succeed in clearing existing trash lists.
128 nextRunOptions.SafeRendezvousState = rs
130 if err = bal.GetCurrentState(client, cluster.Collections.BalanceCollectionBatch, cluster.Collections.BalanceCollectionBuffers); err != nil {
133 bal.ComputeChangeSets()
134 bal.PrintStatistics()
135 if err = bal.CheckSanityLate(); err != nil {
143 err = os.Rename(bal.LostBlocksFile+".tmp", bal.LostBlocksFile)
149 if runOptions.CommitPulls {
150 err = bal.CommitPulls(client)
152 // Skip trash if we can't pull. (Too cautious?)
156 if runOptions.CommitTrash {
157 err = bal.CommitTrash(client)
162 // SetKeepServices sets the list of KeepServices to operate on.
163 func (bal *Balancer) SetKeepServices(srvList arvados.KeepServiceList) error {
164 bal.KeepServices = make(map[string]*KeepService)
165 for _, srv := range srvList.Items {
166 bal.KeepServices[srv.UUID] = &KeepService{
168 ChangeSet: &ChangeSet{},
174 // DiscoverKeepServices sets the list of KeepServices by calling the
175 // API to get a list of all services, and selecting the ones whose
176 // ServiceType is in okTypes.
177 func (bal *Balancer) DiscoverKeepServices(c *arvados.Client, okTypes []string) error {
178 bal.KeepServices = make(map[string]*KeepService)
179 ok := make(map[string]bool)
180 for _, t := range okTypes {
183 return c.EachKeepService(func(srv arvados.KeepService) error {
184 if ok[srv.ServiceType] {
185 bal.KeepServices[srv.UUID] = &KeepService{
187 ChangeSet: &ChangeSet{},
190 bal.logf("skipping %v with service type %q", srv.UUID, srv.ServiceType)
196 func (bal *Balancer) cleanupMounts() {
197 rwdev := map[string]*KeepService{}
198 for _, srv := range bal.KeepServices {
199 for _, mnt := range srv.mounts {
200 if !mnt.ReadOnly && mnt.DeviceID != "" {
201 rwdev[mnt.DeviceID] = srv
205 // Drop the readonly mounts whose device is mounted RW
207 for _, srv := range bal.KeepServices {
208 var dedup []*KeepMount
209 for _, mnt := range srv.mounts {
210 if mnt.ReadOnly && rwdev[mnt.DeviceID] != nil {
211 bal.logf("skipping srv %s readonly mount %q because same device %q is mounted read-write on srv %s", srv, mnt.UUID, mnt.DeviceID, rwdev[mnt.DeviceID])
213 dedup = append(dedup, mnt)
218 for _, srv := range bal.KeepServices {
219 for _, mnt := range srv.mounts {
220 if mnt.Replication <= 0 {
221 log.Printf("%s: mount %s reports replication=%d, using replication=1", srv, mnt.UUID, mnt.Replication)
228 // CheckSanityEarly checks for configuration and runtime errors that
229 // can be detected before GetCurrentState() and ComputeChangeSets()
232 // If it returns an error, it is pointless to run GetCurrentState or
233 // ComputeChangeSets: after doing so, the statistics would be
234 // meaningless and it would be dangerous to run any Commit methods.
235 func (bal *Balancer) CheckSanityEarly(c *arvados.Client) error {
236 u, err := c.CurrentUser()
238 return fmt.Errorf("CurrentUser(): %v", err)
240 if !u.IsActive || !u.IsAdmin {
241 return fmt.Errorf("current user (%s) is not an active admin user", u.UUID)
243 for _, srv := range bal.KeepServices {
244 if srv.ServiceType == "proxy" {
245 return fmt.Errorf("config error: %s: proxy servers cannot be balanced", srv)
249 var checkPage arvados.CollectionList
250 if err = c.RequestAndDecode(&checkPage, "GET", "arvados/v1/collections", nil, arvados.ResourceListParams{
254 IncludeOldVersions: true,
255 Filters: []arvados.Filter{{
262 } else if n := checkPage.ItemsAvailable; n > 0 {
263 return fmt.Errorf("%d collections exist with null modified_at; cannot fetch reliably", n)
269 // rendezvousState returns a fingerprint (e.g., a sorted list of
270 // UUID+host+port) of the current set of keep services.
271 func (bal *Balancer) rendezvousState() string {
272 srvs := make([]string, 0, len(bal.KeepServices))
273 for _, srv := range bal.KeepServices {
274 srvs = append(srvs, srv.String())
277 return strings.Join(srvs, "; ")
280 // ClearTrashLists sends an empty trash list to each keep
281 // service. Calling this before GetCurrentState avoids races.
283 // When a block appears in an index, we assume that replica will still
284 // exist after we delete other replicas on other servers. However,
285 // it's possible that a previous rebalancing operation made different
286 // decisions (e.g., servers were added/removed, and rendezvous order
287 // changed). In this case, the replica might already be on that
288 // server's trash list, and it might be deleted before we send a
289 // replacement trash list.
291 // We avoid this problem if we clear all trash lists before getting
292 // indexes. (We also assume there is only one rebalancing process
293 // running at a time.)
294 func (bal *Balancer) ClearTrashLists(c *arvados.Client) error {
295 for _, srv := range bal.KeepServices {
296 srv.ChangeSet = &ChangeSet{}
298 return bal.CommitTrash(c)
301 // GetCurrentState determines the current replication state, and the
302 // desired replication level, for every block that is either
303 // retrievable or referenced.
305 // It determines the current replication state by reading the block index
306 // from every known Keep service.
308 // It determines the desired replication level by retrieving all
309 // collection manifests in the database (API server).
311 // It encodes the resulting information in BlockStateMap.
312 func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) error {
313 defer bal.time("get_state", "wall clock time to get current state")()
314 bal.BlockStateMap = NewBlockStateMap()
316 dd, err := c.DiscoveryDocument()
320 bal.DefaultReplication = dd.DefaultCollectionReplication
321 bal.MinMtime = time.Now().UnixNano() - dd.BlobSignatureTTL*1e9
323 errs := make(chan error, 1)
324 wg := sync.WaitGroup{}
326 // When a device is mounted more than once, we will get its
327 // index only once, and call AddReplicas on all of the mounts.
328 // equivMount keys are the mounts that will be indexed, and
329 // each value is a list of mounts to apply the received index
331 equivMount := map[*KeepMount][]*KeepMount{}
332 // deviceMount maps each device ID to the one mount that will
333 // be indexed for that device.
334 deviceMount := map[string]*KeepMount{}
335 for _, srv := range bal.KeepServices {
336 for _, mnt := range srv.mounts {
337 equiv := deviceMount[mnt.DeviceID]
340 if mnt.DeviceID != "" {
341 deviceMount[mnt.DeviceID] = equiv
344 equivMount[equiv] = append(equivMount[equiv], mnt)
348 // Start one goroutine for each (non-redundant) mount:
349 // retrieve the index, and add the returned blocks to
351 for _, mounts := range equivMount {
353 go func(mounts []*KeepMount) {
355 bal.logf("mount %s: retrieve index from %s", mounts[0], mounts[0].KeepService)
356 idx, err := mounts[0].KeepService.IndexMount(c, mounts[0].UUID, "")
359 case errs <- fmt.Errorf("%s: retrieve index: %v", mounts[0], err):
365 // Some other goroutine encountered an
366 // error -- any further effort here
370 for _, mount := range mounts {
371 bal.logf("%s: add %d entries to map", mount, len(idx))
372 bal.BlockStateMap.AddReplicas(mount, idx)
373 bal.logf("%s: added %d entries to map at %dx (%d replicas)", mount, len(idx), mount.Replication, len(idx)*mount.Replication)
375 bal.logf("mount %s: index done", mounts[0])
379 // collQ buffers incoming collections so we can start fetching
380 // the next page without waiting for the current page to
381 // finish processing.
382 collQ := make(chan arvados.Collection, bufs)
384 // Start a goroutine to process collections. (We could use a
385 // worker pool here, but even with a single worker we already
386 // process collections much faster than we can retrieve them.)
390 for coll := range collQ {
391 err := bal.addCollection(coll)
392 if err != nil || len(errs) > 0 {
405 // Start a goroutine to retrieve all collections from the
406 // Arvados database and send them to collQ for processing.
410 err = EachCollection(c, pageSize,
411 func(coll arvados.Collection) error {
414 // some other GetCurrentState
415 // error happened: no point
418 return fmt.Errorf("")
421 }, func(done, total int) {
422 bal.logf("collections: %d/%d", done, total)
440 func (bal *Balancer) addCollection(coll arvados.Collection) error {
441 blkids, err := coll.SizedDigests()
443 return fmt.Errorf("%v: %v", coll.UUID, err)
445 repl := bal.DefaultReplication
446 if coll.ReplicationDesired != nil {
447 repl = *coll.ReplicationDesired
449 debugf("%v: %d block x%d", coll.UUID, len(blkids), repl)
450 // Pass pdh to IncreaseDesired only if LostBlocksFile is being
451 // written -- otherwise it's just a waste of memory.
453 if bal.LostBlocksFile != "" {
454 pdh = coll.PortableDataHash
456 bal.BlockStateMap.IncreaseDesired(pdh, coll.StorageClassesDesired, repl, blkids)
460 // ComputeChangeSets compares, for each known block, the current and
461 // desired replication states. If it is possible to get closer to the
462 // desired state by copying or deleting blocks, it adds those changes
463 // to the relevant KeepServices' ChangeSets.
465 // It does not actually apply any of the computed changes.
466 func (bal *Balancer) ComputeChangeSets() {
467 // This just calls balanceBlock() once for each block, using a
468 // pool of worker goroutines.
469 defer bal.time("changeset_compute", "wall clock time to compute changesets")()
470 bal.setupLookupTables()
472 type balanceTask struct {
473 blkid arvados.SizedDigest
476 workers := runtime.GOMAXPROCS(-1)
477 todo := make(chan balanceTask, workers)
479 bal.BlockStateMap.Apply(func(blkid arvados.SizedDigest, blk *BlockState) {
487 results := make(chan balanceResult, workers)
489 var wg sync.WaitGroup
490 for i := 0; i < workers; i++ {
493 for work := range todo {
494 results <- bal.balanceBlock(work.blkid, work.blk)
502 bal.collectStatistics(results)
505 func (bal *Balancer) setupLookupTables() {
506 bal.serviceRoots = make(map[string]string)
507 bal.classes = defaultClasses
508 bal.mountsByClass = map[string]map[*KeepMount]bool{"default": {}}
510 for _, srv := range bal.KeepServices {
511 bal.serviceRoots[srv.UUID] = srv.UUID
512 for _, mnt := range srv.mounts {
515 // All mounts on a read-only service are
516 // effectively read-only.
517 mnt.ReadOnly = mnt.ReadOnly || srv.ReadOnly
519 if len(mnt.StorageClasses) == 0 {
520 bal.mountsByClass["default"][mnt] = true
523 for class := range mnt.StorageClasses {
524 if mbc := bal.mountsByClass[class]; mbc == nil {
525 bal.classes = append(bal.classes, class)
526 bal.mountsByClass[class] = map[*KeepMount]bool{mnt: true}
533 // Consider classes in lexicographic order to avoid flapping
534 // between balancing runs. The outcome of the "prefer a mount
535 // we're already planning to use for a different storage
536 // class" case in balanceBlock depends on the order classes
538 sort.Strings(bal.classes)
548 var changeName = map[int]string{
551 changeTrash: "trash",
555 type balanceResult struct {
557 blkid arvados.SizedDigest
560 classState map[string]balancedBlockState
563 // balanceBlock compares current state to desired state for a single
564 // block, and makes the appropriate ChangeSet calls.
565 func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) balanceResult {
566 debugf("balanceBlock: %v %+v", blkid, blk)
569 mnt *KeepMount // never nil
570 repl *Replica // replica already stored here (or nil)
571 want bool // we should pull/leave a replica here
574 // Build a list of all slots (one per mounted volume).
575 slots := make([]slot, 0, bal.mounts)
576 for _, srv := range bal.KeepServices {
577 for _, mnt := range srv.mounts {
579 for r := range blk.Replicas {
580 if blk.Replicas[r].KeepMount == mnt {
581 repl = &blk.Replicas[r]
584 // Initial value of "want" is "have, and can't
585 // delete". These untrashable replicas get
586 // prioritized when sorting slots: otherwise,
587 // non-optimal readonly copies would cause us
589 slots = append(slots, slot{
592 want: repl != nil && mnt.ReadOnly,
597 uuids := keepclient.NewRootSorter(bal.serviceRoots, string(blkid[:32])).GetSortedRoots()
598 srvRendezvous := make(map[*KeepService]int, len(uuids))
599 for i, uuid := range uuids {
600 srv := bal.KeepServices[uuid]
601 srvRendezvous[srv] = i
604 // Below we set underreplicated=true if we find any storage
605 // class that's currently underreplicated -- in that case we
606 // won't want to trash any replicas.
607 underreplicated := false
609 classState := make(map[string]balancedBlockState, len(bal.classes))
610 unsafeToDelete := make(map[int64]bool, len(slots))
611 for _, class := range bal.classes {
612 desired := blk.Desired[class]
614 countedDev := map[string]bool{}
616 for _, slot := range slots {
617 if slot.repl != nil && bal.mountsByClass[class][slot.mnt] && !countedDev[slot.mnt.DeviceID] {
618 have += slot.mnt.Replication
619 if slot.mnt.DeviceID != "" {
620 countedDev[slot.mnt.DeviceID] = true
624 classState[class] = balancedBlockState{
626 surplus: have - desired,
633 // Sort the slots by desirability.
634 sort.Slice(slots, func(i, j int) bool {
635 si, sj := slots[i], slots[j]
636 if classi, classj := bal.mountsByClass[class][si.mnt], bal.mountsByClass[class][sj.mnt]; classi != classj {
637 // Prefer a mount that satisfies the
639 return bal.mountsByClass[class][si.mnt]
640 } else if si.want != sj.want {
641 // Prefer a mount that will have a
642 // replica no matter what we do here
643 // -- either because it already has an
644 // untrashable replica, or because we
645 // already need it to satisfy a
646 // different storage class.
648 } else if orderi, orderj := srvRendezvous[si.mnt.KeepService], srvRendezvous[sj.mnt.KeepService]; orderi != orderj {
649 // Prefer a better rendezvous
651 return orderi < orderj
652 } else if repli, replj := si.repl != nil, sj.repl != nil; repli != replj {
653 // Prefer a mount that already has a
657 // If pull/trash turns out to be
658 // needed, distribute the
659 // new/remaining replicas uniformly
660 // across qualifying mounts on a given
662 return rendezvousLess(si.mnt.DeviceID, sj.mnt.DeviceID, blkid)
666 // Servers/mounts/devices (with or without existing
667 // replicas) that are part of the best achievable
668 // layout for this storage class.
669 wantSrv := map[*KeepService]bool{}
670 wantMnt := map[*KeepMount]bool{}
671 wantDev := map[string]bool{}
672 // Positions (with existing replicas) that have been
673 // protected (via unsafeToDelete) to ensure we don't
674 // reduce replication below desired level when
675 // trashing replicas that aren't optimal positions for
676 // any storage class.
677 protMnt := map[*KeepMount]bool{}
678 // Replication planned so far (corresponds to wantMnt).
680 // Protected replication (corresponds to protMnt).
683 // trySlot tries using a slot to meet requirements,
684 // and returns true if all requirements are met.
685 trySlot := func(i int) bool {
687 if wantMnt[slot.mnt] || wantDev[slot.mnt.DeviceID] {
688 // Already allocated a replica to this
689 // backend device, possibly on a
693 if replProt < desired && slot.repl != nil && !protMnt[slot.mnt] {
694 unsafeToDelete[slot.repl.Mtime] = true
695 protMnt[slot.mnt] = true
696 replProt += slot.mnt.Replication
698 if replWant < desired && (slot.repl != nil || !slot.mnt.ReadOnly) {
700 wantSrv[slot.mnt.KeepService] = true
701 wantMnt[slot.mnt] = true
702 if slot.mnt.DeviceID != "" {
703 wantDev[slot.mnt.DeviceID] = true
705 replWant += slot.mnt.Replication
707 return replProt >= desired && replWant >= desired
710 // First try to achieve desired replication without
711 // using the same server twice.
713 for i := 0; i < len(slots) && !done; i++ {
714 if !wantSrv[slots[i].mnt.KeepService] {
719 // If that didn't suffice, do another pass without the
720 // "distinct services" restriction. (Achieving the
721 // desired volume replication on fewer than the
722 // desired number of services is better than
723 // underreplicating.)
724 for i := 0; i < len(slots) && !done; i++ {
728 if !underreplicated {
730 for _, slot := range slots {
731 if slot.repl == nil || !bal.mountsByClass[class][slot.mnt] {
734 if safe += slot.mnt.Replication; safe >= desired {
738 underreplicated = safe < desired
741 // set the unachievable flag if there aren't enough
742 // slots offering the relevant storage class. (This is
743 // as easy as checking slots[desired] because we
744 // already sorted the qualifying slots to the front.)
745 if desired >= len(slots) || !bal.mountsByClass[class][slots[desired].mnt] {
746 cs := classState[class]
747 cs.unachievable = true
748 classState[class] = cs
751 // Avoid deleting wanted replicas from devices that
752 // are mounted on multiple servers -- even if they
753 // haven't already been added to unsafeToDelete
754 // because the servers report different Mtimes.
755 for _, slot := range slots {
756 if slot.repl != nil && wantDev[slot.mnt.DeviceID] {
757 unsafeToDelete[slot.repl.Mtime] = true
762 // TODO: If multiple replicas are trashable, prefer the oldest
763 // replica that doesn't have a timestamp collision with
766 countedDev := map[string]bool{}
768 for _, slot := range slots {
769 if countedDev[slot.mnt.DeviceID] {
773 want += slot.mnt.Replication
775 if slot.repl != nil {
776 have += slot.mnt.Replication
778 if slot.mnt.DeviceID != "" {
779 countedDev[slot.mnt.DeviceID] = true
784 for _, slot := range slots {
785 // TODO: request a Touch if Mtime is duplicated.
788 case !underreplicated && !slot.want && slot.repl != nil && slot.repl.Mtime < bal.MinMtime && !unsafeToDelete[slot.repl.Mtime]:
789 slot.mnt.KeepService.AddTrash(Trash{
791 Mtime: slot.repl.Mtime,
795 case len(blk.Replicas) > 0 && slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
796 slot.mnt.KeepService.AddPull(Pull{
798 From: blk.Replicas[0].KeepMount.KeepService,
802 case slot.repl != nil:
807 if bal.Dumper != nil {
809 if slot.repl != nil {
810 mtime = slot.repl.Mtime
812 srv := slot.mnt.KeepService
813 changes = append(changes, fmt.Sprintf("%s:%d/%s=%s,%d", srv.ServiceHost, srv.ServicePort, slot.mnt.UUID, changeName[change], mtime))
816 if bal.Dumper != nil {
817 bal.Dumper.Printf("%s refs=%d have=%d want=%v %v %v", blkid, blk.RefCount, have, want, blk.Desired, changes)
819 return balanceResult{
824 classState: classState,
828 type blocksNBytes struct {
834 func (bb blocksNBytes) String() string {
835 return fmt.Sprintf("%d replicas (%d blocks, %d bytes)", bb.replicas, bb.blocks, bb.bytes)
838 type balancerStats struct {
843 underrep blocksNBytes
844 unachievable blocksNBytes
845 justright blocksNBytes
851 classStats map[string]replicationStats
853 // collectionBytes / collectionBlockBytes = deduplication ratio
854 collectionBytes int64 // sum(bytes in referenced blocks) across all collections
855 collectionBlockBytes int64 // sum(block size) across all blocks referenced by collections
856 collectionBlockRefs int64 // sum(number of blocks referenced) across all collections
857 collectionBlocks int64 // number of blocks referenced by any collection
860 func (s *balancerStats) dedupByteRatio() float64 {
861 if s.collectionBlockBytes == 0 {
864 return float64(s.collectionBytes) / float64(s.collectionBlockBytes)
867 func (s *balancerStats) dedupBlockRatio() float64 {
868 if s.collectionBlocks == 0 {
871 return float64(s.collectionBlockRefs) / float64(s.collectionBlocks)
874 type replicationStats struct {
878 unachievable blocksNBytes
881 type balancedBlockState struct {
887 func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
889 s.replHistogram = make([]int, 2)
890 s.classStats = make(map[string]replicationStats, len(bal.classes))
891 for result := range results {
892 surplus := result.have - result.want
893 bytes := result.blkid.Size()
895 if rc := int64(result.blk.RefCount); rc > 0 {
896 s.collectionBytes += rc * bytes
897 s.collectionBlockBytes += bytes
898 s.collectionBlockRefs += rc
902 for class, state := range result.classState {
903 cs := s.classStats[class]
904 if state.unachievable {
905 cs.unachievable.blocks++
906 cs.unachievable.bytes += bytes
908 if state.desired > 0 {
909 cs.desired.replicas += state.desired
911 cs.desired.bytes += bytes * int64(state.desired)
913 if state.surplus > 0 {
914 cs.surplus.replicas += state.surplus
916 cs.surplus.bytes += bytes * int64(state.surplus)
917 } else if state.surplus < 0 {
918 cs.short.replicas += -state.surplus
920 cs.short.bytes += bytes * int64(-state.surplus)
922 s.classStats[class] = cs
926 case result.have == 0 && result.want > 0:
927 s.lost.replicas -= surplus
929 s.lost.bytes += bytes * int64(-surplus)
930 fmt.Fprintf(bal.lostBlocks, "%s", strings.SplitN(string(result.blkid), "+", 2)[0])
931 for pdh := range result.blk.Refs {
932 fmt.Fprintf(bal.lostBlocks, " %s", pdh)
934 fmt.Fprint(bal.lostBlocks, "\n")
936 s.underrep.replicas -= surplus
938 s.underrep.bytes += bytes * int64(-surplus)
939 case surplus > 0 && result.want == 0:
940 counter := &s.garbage
941 for _, r := range result.blk.Replicas {
942 if r.Mtime >= bal.MinMtime {
947 counter.replicas += surplus
949 counter.bytes += bytes * int64(surplus)
951 s.overrep.replicas += surplus
953 s.overrep.bytes += bytes * int64(result.have-result.want)
955 s.justright.replicas += result.want
957 s.justright.bytes += bytes * int64(result.want)
961 s.desired.replicas += result.want
963 s.desired.bytes += bytes * int64(result.want)
966 s.current.replicas += result.have
968 s.current.bytes += bytes * int64(result.have)
971 for len(s.replHistogram) <= result.have {
972 s.replHistogram = append(s.replHistogram, 0)
974 s.replHistogram[result.have]++
976 for _, srv := range bal.KeepServices {
977 s.pulls += len(srv.ChangeSet.Pulls)
978 s.trashes += len(srv.ChangeSet.Trashes)
981 bal.Metrics.UpdateStats(s)
984 // PrintStatistics writes statistics about the computed changes to
985 // bal.Logger. It should not be called until ComputeChangeSets has
987 func (bal *Balancer) PrintStatistics() {
989 bal.logf("%s lost (0=have<want)", bal.stats.lost)
990 bal.logf("%s underreplicated (0<have<want)", bal.stats.underrep)
991 bal.logf("%s just right (have=want)", bal.stats.justright)
992 bal.logf("%s overreplicated (have>want>0)", bal.stats.overrep)
993 bal.logf("%s unreferenced (have>want=0, new)", bal.stats.unref)
994 bal.logf("%s garbage (have>want=0, old)", bal.stats.garbage)
995 for _, class := range bal.classes {
996 cs := bal.stats.classStats[class]
998 bal.logf("storage class %q: %s desired", class, cs.desired)
999 bal.logf("storage class %q: %s short", class, cs.short)
1000 bal.logf("storage class %q: %s surplus", class, cs.surplus)
1001 bal.logf("storage class %q: %s unachievable", class, cs.unachievable)
1004 bal.logf("%s total commitment (excluding unreferenced)", bal.stats.desired)
1005 bal.logf("%s total usage", bal.stats.current)
1007 for _, srv := range bal.KeepServices {
1008 bal.logf("%s: %v\n", srv, srv.ChangeSet)
1011 bal.printHistogram(60)
1015 func (bal *Balancer) printHistogram(hashColumns int) {
1016 bal.logf("Replication level distribution (counting N replicas on a single server as N):")
1018 for _, count := range bal.stats.replHistogram {
1019 if maxCount < count {
1023 hashes := strings.Repeat("#", hashColumns)
1024 countWidth := 1 + int(math.Log10(float64(maxCount+1)))
1025 scaleCount := 10 * float64(hashColumns) / math.Floor(1+10*math.Log10(float64(maxCount+1)))
1026 for repl, count := range bal.stats.replHistogram {
1027 nHashes := int(scaleCount * math.Log10(float64(count+1)))
1028 bal.logf("%2d: %*d %s", repl, countWidth, count, hashes[:nHashes])
1032 // CheckSanityLate checks for configuration and runtime errors after
1033 // GetCurrentState() and ComputeChangeSets() have finished.
1035 // If it returns an error, it is dangerous to run any Commit methods.
1036 func (bal *Balancer) CheckSanityLate() error {
1037 if bal.errors != nil {
1038 for _, err := range bal.errors {
1039 bal.logf("deferred error: %v", err)
1041 return fmt.Errorf("cannot proceed safely after deferred errors")
1044 if bal.collScanned == 0 {
1045 return fmt.Errorf("received zero collections")
1049 bal.BlockStateMap.Apply(func(_ arvados.SizedDigest, blk *BlockState) {
1050 for _, desired := range blk.Desired {
1058 return fmt.Errorf("zero blocks have desired replication>0")
1061 if dr := bal.DefaultReplication; dr < 1 {
1062 return fmt.Errorf("Default replication (%d) is less than 1", dr)
1065 // TODO: no two services have identical indexes
1066 // TODO: no collisions (same md5, different size)
1070 // CommitPulls sends the computed lists of pull requests to the
1071 // keepstore servers. This has the effect of increasing replication of
1072 // existing blocks that are either underreplicated or poorly
1073 // distributed according to rendezvous hashing.
1074 func (bal *Balancer) CommitPulls(c *arvados.Client) error {
1075 defer bal.time("send_pull_lists", "wall clock time to send pull lists")()
1076 return bal.commitAsync(c, "send pull list",
1077 func(srv *KeepService) error {
1078 return srv.CommitPulls(c)
1082 // CommitTrash sends the computed lists of trash requests to the
1083 // keepstore servers. This has the effect of deleting blocks that are
1084 // overreplicated or unreferenced.
1085 func (bal *Balancer) CommitTrash(c *arvados.Client) error {
1086 defer bal.time("send_trash_lists", "wall clock time to send trash lists")()
1087 return bal.commitAsync(c, "send trash list",
1088 func(srv *KeepService) error {
1089 return srv.CommitTrash(c)
1093 func (bal *Balancer) commitAsync(c *arvados.Client, label string, f func(srv *KeepService) error) error {
1094 errs := make(chan error)
1095 for _, srv := range bal.KeepServices {
1096 go func(srv *KeepService) {
1098 defer func() { errs <- err }()
1099 label := fmt.Sprintf("%s: %v", srv, label)
1102 err = fmt.Errorf("%s: %v", label, err)
1107 for range bal.KeepServices {
1108 if err := <-errs; err != nil {
1117 func (bal *Balancer) logf(f string, args ...interface{}) {
1118 if bal.Logger != nil {
1119 bal.Logger.Printf(f, args...)
1123 func (bal *Balancer) time(name, help string) func() {
1124 observer := bal.Metrics.DurationObserver(name+"_seconds", help)
1126 bal.Logger.Printf("%s: start", name)
1128 dur := time.Since(t0)
1129 observer.Observe(dur.Seconds())
1130 bal.Logger.Printf("%s: took %vs", name, dur.Seconds())
1134 // Rendezvous hash sort function. Less efficient than sorting on
1135 // precomputed rendezvous hashes, but also rarely used.
1136 func rendezvousLess(i, j string, blkid arvados.SizedDigest) bool {
1137 a := md5.Sum([]byte(string(blkid[:32]) + i))
1138 b := md5.Sum([]byte(string(blkid[:32]) + j))
1139 return bytes.Compare(a[:], b[:]) < 0