1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
24 "git.arvados.org/arvados.git/sdk/go/arvados"
25 "git.arvados.org/arvados.git/sdk/go/keepclient"
26 "github.com/sirupsen/logrus"
29 // Balancer compares the contents of keepstore servers with the
30 // collections stored in Arvados, and issues pull/trash requests
31 // needed to get (closer to) the optimal data layout.
33 // In the optimal data layout: every data block referenced by a
34 // collection is replicated at least as many times as desired by the
35 // collection; there are no unreferenced data blocks older than
36 // BlobSignatureTTL; and all N existing replicas of a given data block
37 // are in the N best positions in rendezvous probe order.
38 type Balancer struct {
39 Logger logrus.FieldLogger
40 Dumper logrus.FieldLogger
46 KeepServices map[string]*KeepService
47 DefaultReplication int
52 mountsByClass map[string]map[*KeepMount]bool
54 serviceRoots map[string]string
61 // Run performs a balance operation using the given config and
62 // runOptions, and returns RunOptions suitable for passing to a
63 // subsequent balance operation.
65 // Run should only be called once on a given Balancer object.
69 // runOptions, err = (&Balancer{}).Run(config, runOptions)
70 func (bal *Balancer) Run(client *arvados.Client, cluster *arvados.Cluster, runOptions RunOptions) (nextRunOptions RunOptions, err error) {
71 nextRunOptions = runOptions
73 defer bal.time("sweep", "wall clock time to run one full sweep")()
76 if bal.LostBlocksFile != "" {
77 tmpfn := bal.LostBlocksFile + ".tmp"
78 lbFile, err = os.OpenFile(tmpfn, os.O_CREATE|os.O_WRONLY, 0777)
83 err = syscall.Flock(int(lbFile.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
88 // Remove the tempfile only if we didn't get
89 // as far as successfully renaming it.
94 bal.lostBlocks = lbFile
96 bal.lostBlocks = ioutil.Discard
99 err = bal.DiscoverKeepServices(client)
104 for _, srv := range bal.KeepServices {
105 err = srv.discoverMounts(client)
112 if err = bal.CheckSanityEarly(client); err != nil {
115 rs := bal.rendezvousState()
116 if runOptions.CommitTrash && rs != runOptions.SafeRendezvousState {
117 if runOptions.SafeRendezvousState != "" {
118 bal.logf("notice: KeepServices list has changed since last run")
120 bal.logf("clearing existing trash lists, in case the new rendezvous order differs from previous run")
121 if err = bal.ClearTrashLists(client); err != nil {
124 // The current rendezvous state becomes "safe" (i.e.,
125 // OK to compute changes for that state without
126 // clearing existing trash lists) only now, after we
127 // succeed in clearing existing trash lists.
128 nextRunOptions.SafeRendezvousState = rs
131 // Indexing and sending trash/pull lists can take a long time
132 // on a big site. Prefer a long timeout (causing slow recovery
133 // from undetected network problems) to a short timeout
134 // (causing starvation via perpetual timeout/restart cycle).
135 client.Timeout = 24 * time.Hour
137 if err = bal.GetCurrentState(client, cluster.Collections.BalanceCollectionBatch, cluster.Collections.BalanceCollectionBuffers); err != nil {
140 bal.ComputeChangeSets()
141 bal.PrintStatistics()
142 if err = bal.CheckSanityLate(); err != nil {
150 err = os.Rename(bal.LostBlocksFile+".tmp", bal.LostBlocksFile)
156 if runOptions.CommitPulls {
157 err = bal.CommitPulls(client)
159 // Skip trash if we can't pull. (Too cautious?)
163 if runOptions.CommitTrash {
164 err = bal.CommitTrash(client)
169 // SetKeepServices sets the list of KeepServices to operate on.
170 func (bal *Balancer) SetKeepServices(srvList arvados.KeepServiceList) error {
171 bal.KeepServices = make(map[string]*KeepService)
172 for _, srv := range srvList.Items {
173 bal.KeepServices[srv.UUID] = &KeepService{
175 ChangeSet: &ChangeSet{},
181 // DiscoverKeepServices sets the list of KeepServices by calling the
182 // API to get a list of all services, and selecting the ones whose
183 // ServiceType is "disk"
184 func (bal *Balancer) DiscoverKeepServices(c *arvados.Client) error {
185 bal.KeepServices = make(map[string]*KeepService)
186 return c.EachKeepService(func(srv arvados.KeepService) error {
187 if srv.ServiceType == "disk" {
188 bal.KeepServices[srv.UUID] = &KeepService{
190 ChangeSet: &ChangeSet{},
193 bal.logf("skipping %v with service type %q", srv.UUID, srv.ServiceType)
199 func (bal *Balancer) cleanupMounts() {
200 rwdev := map[string]*KeepService{}
201 for _, srv := range bal.KeepServices {
202 for _, mnt := range srv.mounts {
203 if !mnt.ReadOnly && mnt.DeviceID != "" {
204 rwdev[mnt.DeviceID] = srv
208 // Drop the readonly mounts whose device is mounted RW
210 for _, srv := range bal.KeepServices {
211 var dedup []*KeepMount
212 for _, mnt := range srv.mounts {
213 if mnt.ReadOnly && rwdev[mnt.DeviceID] != nil {
214 bal.logf("skipping srv %s readonly mount %q because same device %q is mounted read-write on srv %s", srv, mnt.UUID, mnt.DeviceID, rwdev[mnt.DeviceID])
216 dedup = append(dedup, mnt)
221 for _, srv := range bal.KeepServices {
222 for _, mnt := range srv.mounts {
223 if mnt.Replication <= 0 {
224 log.Printf("%s: mount %s reports replication=%d, using replication=1", srv, mnt.UUID, mnt.Replication)
231 // CheckSanityEarly checks for configuration and runtime errors that
232 // can be detected before GetCurrentState() and ComputeChangeSets()
235 // If it returns an error, it is pointless to run GetCurrentState or
236 // ComputeChangeSets: after doing so, the statistics would be
237 // meaningless and it would be dangerous to run any Commit methods.
238 func (bal *Balancer) CheckSanityEarly(c *arvados.Client) error {
239 u, err := c.CurrentUser()
241 return fmt.Errorf("CurrentUser(): %v", err)
243 if !u.IsActive || !u.IsAdmin {
244 return fmt.Errorf("current user (%s) is not an active admin user", u.UUID)
246 for _, srv := range bal.KeepServices {
247 if srv.ServiceType == "proxy" {
248 return fmt.Errorf("config error: %s: proxy servers cannot be balanced", srv)
252 var checkPage arvados.CollectionList
253 if err = c.RequestAndDecode(&checkPage, "GET", "arvados/v1/collections", nil, arvados.ResourceListParams{
257 IncludeOldVersions: true,
258 Filters: []arvados.Filter{{
265 } else if n := checkPage.ItemsAvailable; n > 0 {
266 return fmt.Errorf("%d collections exist with null modified_at; cannot fetch reliably", n)
272 // rendezvousState returns a fingerprint (e.g., a sorted list of
273 // UUID+host+port) of the current set of keep services.
274 func (bal *Balancer) rendezvousState() string {
275 srvs := make([]string, 0, len(bal.KeepServices))
276 for _, srv := range bal.KeepServices {
277 srvs = append(srvs, srv.String())
280 return strings.Join(srvs, "; ")
283 // ClearTrashLists sends an empty trash list to each keep
284 // service. Calling this before GetCurrentState avoids races.
286 // When a block appears in an index, we assume that replica will still
287 // exist after we delete other replicas on other servers. However,
288 // it's possible that a previous rebalancing operation made different
289 // decisions (e.g., servers were added/removed, and rendezvous order
290 // changed). In this case, the replica might already be on that
291 // server's trash list, and it might be deleted before we send a
292 // replacement trash list.
294 // We avoid this problem if we clear all trash lists before getting
295 // indexes. (We also assume there is only one rebalancing process
296 // running at a time.)
297 func (bal *Balancer) ClearTrashLists(c *arvados.Client) error {
298 for _, srv := range bal.KeepServices {
299 srv.ChangeSet = &ChangeSet{}
301 return bal.CommitTrash(c)
304 // GetCurrentState determines the current replication state, and the
305 // desired replication level, for every block that is either
306 // retrievable or referenced.
308 // It determines the current replication state by reading the block index
309 // from every known Keep service.
311 // It determines the desired replication level by retrieving all
312 // collection manifests in the database (API server).
314 // It encodes the resulting information in BlockStateMap.
315 func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) error {
316 ctx, cancel := context.WithCancel(context.Background())
319 defer bal.time("get_state", "wall clock time to get current state")()
320 bal.BlockStateMap = NewBlockStateMap()
322 dd, err := c.DiscoveryDocument()
326 bal.DefaultReplication = dd.DefaultCollectionReplication
327 bal.MinMtime = time.Now().UnixNano() - dd.BlobSignatureTTL*1e9
329 errs := make(chan error, 1)
330 wg := sync.WaitGroup{}
332 // When a device is mounted more than once, we will get its
333 // index only once, and call AddReplicas on all of the mounts.
334 // equivMount keys are the mounts that will be indexed, and
335 // each value is a list of mounts to apply the received index
337 equivMount := map[*KeepMount][]*KeepMount{}
338 // deviceMount maps each device ID to the one mount that will
339 // be indexed for that device.
340 deviceMount := map[string]*KeepMount{}
341 for _, srv := range bal.KeepServices {
342 for _, mnt := range srv.mounts {
343 equiv := deviceMount[mnt.DeviceID]
346 if mnt.DeviceID != "" {
347 deviceMount[mnt.DeviceID] = equiv
350 equivMount[equiv] = append(equivMount[equiv], mnt)
354 // Start one goroutine for each (non-redundant) mount:
355 // retrieve the index, and add the returned blocks to
357 for _, mounts := range equivMount {
359 go func(mounts []*KeepMount) {
361 bal.logf("mount %s: retrieve index from %s", mounts[0], mounts[0].KeepService)
362 idx, err := mounts[0].KeepService.IndexMount(ctx, c, mounts[0].UUID, "")
365 case errs <- fmt.Errorf("%s: retrieve index: %v", mounts[0], err):
372 // Some other goroutine encountered an
373 // error -- any further effort here
377 for _, mount := range mounts {
378 bal.logf("%s: add %d entries to map", mount, len(idx))
379 bal.BlockStateMap.AddReplicas(mount, idx)
380 bal.logf("%s: added %d entries to map at %dx (%d replicas)", mount, len(idx), mount.Replication, len(idx)*mount.Replication)
382 bal.logf("mount %s: index done", mounts[0])
386 // collQ buffers incoming collections so we can start fetching
387 // the next page without waiting for the current page to
388 // finish processing.
389 collQ := make(chan arvados.Collection, bufs)
391 // Start a goroutine to process collections. (We could use a
392 // worker pool here, but even with a single worker we already
393 // process collections much faster than we can retrieve them.)
397 for coll := range collQ {
398 err := bal.addCollection(coll)
399 if err != nil || len(errs) > 0 {
413 // Start a goroutine to retrieve all collections from the
414 // Arvados database and send them to collQ for processing.
418 err = EachCollection(c, pageSize,
419 func(coll arvados.Collection) error {
422 // some other GetCurrentState
423 // error happened: no point
426 return fmt.Errorf("")
429 }, func(done, total int) {
430 bal.logf("collections: %d/%d", done, total)
449 func (bal *Balancer) addCollection(coll arvados.Collection) error {
450 blkids, err := coll.SizedDigests()
452 return fmt.Errorf("%v: %v", coll.UUID, err)
454 repl := bal.DefaultReplication
455 if coll.ReplicationDesired != nil {
456 repl = *coll.ReplicationDesired
458 bal.Logger.Debugf("%v: %d block x%d", coll.UUID, len(blkids), repl)
459 // Pass pdh to IncreaseDesired only if LostBlocksFile is being
460 // written -- otherwise it's just a waste of memory.
462 if bal.LostBlocksFile != "" {
463 pdh = coll.PortableDataHash
465 bal.BlockStateMap.IncreaseDesired(pdh, coll.StorageClassesDesired, repl, blkids)
469 // ComputeChangeSets compares, for each known block, the current and
470 // desired replication states. If it is possible to get closer to the
471 // desired state by copying or deleting blocks, it adds those changes
472 // to the relevant KeepServices' ChangeSets.
474 // It does not actually apply any of the computed changes.
475 func (bal *Balancer) ComputeChangeSets() {
476 // This just calls balanceBlock() once for each block, using a
477 // pool of worker goroutines.
478 defer bal.time("changeset_compute", "wall clock time to compute changesets")()
479 bal.setupLookupTables()
481 type balanceTask struct {
482 blkid arvados.SizedDigest
485 workers := runtime.GOMAXPROCS(-1)
486 todo := make(chan balanceTask, workers)
488 bal.BlockStateMap.Apply(func(blkid arvados.SizedDigest, blk *BlockState) {
496 results := make(chan balanceResult, workers)
498 var wg sync.WaitGroup
499 for i := 0; i < workers; i++ {
502 for work := range todo {
503 results <- bal.balanceBlock(work.blkid, work.blk)
511 bal.collectStatistics(results)
514 func (bal *Balancer) setupLookupTables() {
515 bal.serviceRoots = make(map[string]string)
516 bal.classes = defaultClasses
517 bal.mountsByClass = map[string]map[*KeepMount]bool{"default": {}}
519 for _, srv := range bal.KeepServices {
520 bal.serviceRoots[srv.UUID] = srv.UUID
521 for _, mnt := range srv.mounts {
524 // All mounts on a read-only service are
525 // effectively read-only.
526 mnt.ReadOnly = mnt.ReadOnly || srv.ReadOnly
528 if len(mnt.StorageClasses) == 0 {
529 bal.mountsByClass["default"][mnt] = true
532 for class := range mnt.StorageClasses {
533 if mbc := bal.mountsByClass[class]; mbc == nil {
534 bal.classes = append(bal.classes, class)
535 bal.mountsByClass[class] = map[*KeepMount]bool{mnt: true}
542 // Consider classes in lexicographic order to avoid flapping
543 // between balancing runs. The outcome of the "prefer a mount
544 // we're already planning to use for a different storage
545 // class" case in balanceBlock depends on the order classes
547 sort.Strings(bal.classes)
557 var changeName = map[int]string{
560 changeTrash: "trash",
564 type balancedBlockState struct {
571 type balanceResult struct {
573 blkid arvados.SizedDigest
575 blockState balancedBlockState
576 classState map[string]balancedBlockState
580 mnt *KeepMount // never nil
581 repl *Replica // replica already stored here (or nil)
582 want bool // we should pull/leave a replica here
585 // balanceBlock compares current state to desired state for a single
586 // block, and makes the appropriate ChangeSet calls.
587 func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) balanceResult {
588 bal.Logger.Debugf("balanceBlock: %v %+v", blkid, blk)
590 // Build a list of all slots (one per mounted volume).
591 slots := make([]slot, 0, bal.mounts)
592 for _, srv := range bal.KeepServices {
593 for _, mnt := range srv.mounts {
595 for r := range blk.Replicas {
596 if blk.Replicas[r].KeepMount == mnt {
597 repl = &blk.Replicas[r]
600 // Initial value of "want" is "have, and can't
601 // delete". These untrashable replicas get
602 // prioritized when sorting slots: otherwise,
603 // non-optimal readonly copies would cause us
605 slots = append(slots, slot{
608 want: repl != nil && mnt.ReadOnly,
613 uuids := keepclient.NewRootSorter(bal.serviceRoots, string(blkid[:32])).GetSortedRoots()
614 srvRendezvous := make(map[*KeepService]int, len(uuids))
615 for i, uuid := range uuids {
616 srv := bal.KeepServices[uuid]
617 srvRendezvous[srv] = i
620 // Below we set underreplicated=true if we find any storage
621 // class that's currently underreplicated -- in that case we
622 // won't want to trash any replicas.
623 underreplicated := false
625 unsafeToDelete := make(map[int64]bool, len(slots))
626 for _, class := range bal.classes {
627 desired := blk.Desired[class]
632 // Sort the slots by desirability.
633 sort.Slice(slots, func(i, j int) bool {
634 si, sj := slots[i], slots[j]
635 if classi, classj := bal.mountsByClass[class][si.mnt], bal.mountsByClass[class][sj.mnt]; classi != classj {
636 // Prefer a mount that satisfies the
638 return bal.mountsByClass[class][si.mnt]
639 } else if si.want != sj.want {
640 // Prefer a mount that will have a
641 // replica no matter what we do here
642 // -- either because it already has an
643 // untrashable replica, or because we
644 // already need it to satisfy a
645 // different storage class.
647 } else if orderi, orderj := srvRendezvous[si.mnt.KeepService], srvRendezvous[sj.mnt.KeepService]; orderi != orderj {
648 // Prefer a better rendezvous
650 return orderi < orderj
651 } else if repli, replj := si.repl != nil, sj.repl != nil; repli != replj {
652 // Prefer a mount that already has a
656 // If pull/trash turns out to be
657 // needed, distribute the
658 // new/remaining replicas uniformly
659 // across qualifying mounts on a given
661 return rendezvousLess(si.mnt.DeviceID, sj.mnt.DeviceID, blkid)
665 // Servers/mounts/devices (with or without existing
666 // replicas) that are part of the best achievable
667 // layout for this storage class.
668 wantSrv := map[*KeepService]bool{}
669 wantMnt := map[*KeepMount]bool{}
670 wantDev := map[string]bool{}
671 // Positions (with existing replicas) that have been
672 // protected (via unsafeToDelete) to ensure we don't
673 // reduce replication below desired level when
674 // trashing replicas that aren't optimal positions for
675 // any storage class.
676 protMnt := map[*KeepMount]bool{}
677 // Replication planned so far (corresponds to wantMnt).
679 // Protected replication (corresponds to protMnt).
682 // trySlot tries using a slot to meet requirements,
683 // and returns true if all requirements are met.
684 trySlot := func(i int) bool {
686 if wantMnt[slot.mnt] || wantDev[slot.mnt.DeviceID] {
687 // Already allocated a replica to this
688 // backend device, possibly on a
692 if replProt < desired && slot.repl != nil && !protMnt[slot.mnt] {
693 unsafeToDelete[slot.repl.Mtime] = true
694 protMnt[slot.mnt] = true
695 replProt += slot.mnt.Replication
697 if replWant < desired && (slot.repl != nil || !slot.mnt.ReadOnly) {
699 wantSrv[slot.mnt.KeepService] = true
700 wantMnt[slot.mnt] = true
701 if slot.mnt.DeviceID != "" {
702 wantDev[slot.mnt.DeviceID] = true
704 replWant += slot.mnt.Replication
706 return replProt >= desired && replWant >= desired
709 // First try to achieve desired replication without
710 // using the same server twice.
712 for i := 0; i < len(slots) && !done; i++ {
713 if !wantSrv[slots[i].mnt.KeepService] {
718 // If that didn't suffice, do another pass without the
719 // "distinct services" restriction. (Achieving the
720 // desired volume replication on fewer than the
721 // desired number of services is better than
722 // underreplicating.)
723 for i := 0; i < len(slots) && !done; i++ {
727 if !underreplicated {
729 for _, slot := range slots {
730 if slot.repl == nil || !bal.mountsByClass[class][slot.mnt] {
733 if safe += slot.mnt.Replication; safe >= desired {
737 underreplicated = safe < desired
740 // Avoid deleting wanted replicas from devices that
741 // are mounted on multiple servers -- even if they
742 // haven't already been added to unsafeToDelete
743 // because the servers report different Mtimes.
744 for _, slot := range slots {
745 if slot.repl != nil && wantDev[slot.mnt.DeviceID] {
746 unsafeToDelete[slot.repl.Mtime] = true
751 // TODO: If multiple replicas are trashable, prefer the oldest
752 // replica that doesn't have a timestamp collision with
755 for i, slot := range slots {
756 // Don't trash (1) any replicas of an underreplicated
757 // block, even if they're in the wrong positions, or
758 // (2) any replicas whose Mtimes are identical to
759 // needed replicas (in case we're really seeing the
760 // same copy via different mounts).
761 if slot.repl != nil && (underreplicated || unsafeToDelete[slot.repl.Mtime]) {
766 classState := make(map[string]balancedBlockState, len(bal.classes))
767 for _, class := range bal.classes {
768 classState[class] = computeBlockState(slots, bal.mountsByClass[class], len(blk.Replicas), blk.Desired[class])
770 blockState := computeBlockState(slots, nil, len(blk.Replicas), 0)
774 for _, slot := range slots {
775 // TODO: request a Touch if Mtime is duplicated.
778 case !slot.want && slot.repl != nil && slot.repl.Mtime < bal.MinMtime:
779 slot.mnt.KeepService.AddTrash(Trash{
781 Mtime: slot.repl.Mtime,
785 case slot.repl == nil && slot.want && len(blk.Replicas) == 0:
788 case slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
789 slot.mnt.KeepService.AddPull(Pull{
791 From: blk.Replicas[0].KeepMount.KeepService,
795 case slot.repl != nil:
800 if bal.Dumper != nil {
802 if slot.repl != nil {
803 mtime = slot.repl.Mtime
805 srv := slot.mnt.KeepService
806 changes = append(changes, fmt.Sprintf("%s:%d/%s=%s,%d", srv.ServiceHost, srv.ServicePort, slot.mnt.UUID, changeName[change], mtime))
809 if bal.Dumper != nil {
810 bal.Dumper.Printf("%s refs=%d needed=%d unneeded=%d pulling=%v %v %v", blkid, blk.RefCount, blockState.needed, blockState.unneeded, blockState.pulling, blk.Desired, changes)
812 return balanceResult{
816 blockState: blockState,
817 classState: classState,
821 func computeBlockState(slots []slot, onlyCount map[*KeepMount]bool, have, needRepl int) (bbs balancedBlockState) {
823 countedDev := map[string]bool{}
824 for _, slot := range slots {
825 if onlyCount != nil && !onlyCount[slot.mnt] {
828 if countedDev[slot.mnt.DeviceID] {
832 case slot.repl != nil && slot.want:
834 repl += slot.mnt.Replication
835 case slot.repl != nil && !slot.want:
837 repl += slot.mnt.Replication
838 case slot.repl == nil && slot.want && have > 0:
840 repl += slot.mnt.Replication
842 if slot.mnt.DeviceID != "" {
843 countedDev[slot.mnt.DeviceID] = true
847 bbs.unachievable = true
852 type blocksNBytes struct {
858 func (bb blocksNBytes) String() string {
859 return fmt.Sprintf("%d replicas (%d blocks, %d bytes)", bb.replicas, bb.blocks, bb.bytes)
862 type replicationStats struct {
864 unneeded blocksNBytes
866 unachievable blocksNBytes
869 type balancerStats struct {
874 underrep blocksNBytes
875 unachievable blocksNBytes
876 justright blocksNBytes
882 classStats map[string]replicationStats
884 // collectionBytes / collectionBlockBytes = deduplication ratio
885 collectionBytes int64 // sum(bytes in referenced blocks) across all collections
886 collectionBlockBytes int64 // sum(block size) across all blocks referenced by collections
887 collectionBlockRefs int64 // sum(number of blocks referenced) across all collections
888 collectionBlocks int64 // number of blocks referenced by any collection
891 func (s *balancerStats) dedupByteRatio() float64 {
892 if s.collectionBlockBytes == 0 {
895 return float64(s.collectionBytes) / float64(s.collectionBlockBytes)
898 func (s *balancerStats) dedupBlockRatio() float64 {
899 if s.collectionBlocks == 0 {
902 return float64(s.collectionBlockRefs) / float64(s.collectionBlocks)
905 func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
907 s.replHistogram = make([]int, 2)
908 s.classStats = make(map[string]replicationStats, len(bal.classes))
909 for result := range results {
910 bytes := result.blkid.Size()
912 if rc := int64(result.blk.RefCount); rc > 0 {
913 s.collectionBytes += rc * bytes
914 s.collectionBlockBytes += bytes
915 s.collectionBlockRefs += rc
919 for class, state := range result.classState {
920 cs := s.classStats[class]
921 if state.unachievable {
922 cs.unachievable.replicas++
923 cs.unachievable.blocks++
924 cs.unachievable.bytes += bytes
926 if state.needed > 0 {
927 cs.needed.replicas += state.needed
929 cs.needed.bytes += bytes * int64(state.needed)
931 if state.unneeded > 0 {
932 cs.unneeded.replicas += state.unneeded
934 cs.unneeded.bytes += bytes * int64(state.unneeded)
936 if state.pulling > 0 {
937 cs.pulling.replicas += state.pulling
939 cs.pulling.bytes += bytes * int64(state.pulling)
941 s.classStats[class] = cs
944 bs := result.blockState
949 s.lost.bytes += bytes
950 fmt.Fprintf(bal.lostBlocks, "%s", strings.SplitN(string(result.blkid), "+", 2)[0])
951 for pdh := range result.blk.Refs {
952 fmt.Fprintf(bal.lostBlocks, " %s", pdh)
954 fmt.Fprint(bal.lostBlocks, "\n")
956 s.underrep.replicas += bs.pulling
958 s.underrep.bytes += bytes * int64(bs.pulling)
959 case bs.unachievable:
960 s.underrep.replicas++
962 s.underrep.bytes += bytes
963 case bs.unneeded > 0 && bs.needed == 0:
964 // Count as "garbage" if all replicas are old
965 // enough to trash, otherwise count as
967 counter := &s.garbage
968 for _, r := range result.blk.Replicas {
969 if r.Mtime >= bal.MinMtime {
974 counter.replicas += bs.unneeded
976 counter.bytes += bytes * int64(bs.unneeded)
977 case bs.unneeded > 0:
978 s.overrep.replicas += bs.unneeded
980 s.overrep.bytes += bytes * int64(bs.unneeded)
982 s.justright.replicas += bs.needed
984 s.justright.bytes += bytes * int64(bs.needed)
988 s.desired.replicas += bs.needed
990 s.desired.bytes += bytes * int64(bs.needed)
992 if bs.needed+bs.unneeded > 0 {
993 s.current.replicas += bs.needed + bs.unneeded
995 s.current.bytes += bytes * int64(bs.needed+bs.unneeded)
998 for len(s.replHistogram) <= bs.needed+bs.unneeded {
999 s.replHistogram = append(s.replHistogram, 0)
1001 s.replHistogram[bs.needed+bs.unneeded]++
1003 for _, srv := range bal.KeepServices {
1004 s.pulls += len(srv.ChangeSet.Pulls)
1005 s.trashes += len(srv.ChangeSet.Trashes)
1008 bal.Metrics.UpdateStats(s)
1011 // PrintStatistics writes statistics about the computed changes to
1012 // bal.Logger. It should not be called until ComputeChangeSets has
1014 func (bal *Balancer) PrintStatistics() {
1016 bal.logf("%s lost (0=have<want)", bal.stats.lost)
1017 bal.logf("%s underreplicated (0<have<want)", bal.stats.underrep)
1018 bal.logf("%s just right (have=want)", bal.stats.justright)
1019 bal.logf("%s overreplicated (have>want>0)", bal.stats.overrep)
1020 bal.logf("%s unreferenced (have>want=0, new)", bal.stats.unref)
1021 bal.logf("%s garbage (have>want=0, old)", bal.stats.garbage)
1022 for _, class := range bal.classes {
1023 cs := bal.stats.classStats[class]
1025 bal.logf("storage class %q: %s needed", class, cs.needed)
1026 bal.logf("storage class %q: %s unneeded", class, cs.unneeded)
1027 bal.logf("storage class %q: %s pulling", class, cs.pulling)
1028 bal.logf("storage class %q: %s unachievable", class, cs.unachievable)
1031 bal.logf("%s total commitment (excluding unreferenced)", bal.stats.desired)
1032 bal.logf("%s total usage", bal.stats.current)
1034 for _, srv := range bal.KeepServices {
1035 bal.logf("%s: %v\n", srv, srv.ChangeSet)
1038 bal.printHistogram(60)
1042 func (bal *Balancer) printHistogram(hashColumns int) {
1043 bal.logf("Replication level distribution:")
1045 for _, count := range bal.stats.replHistogram {
1046 if maxCount < count {
1050 hashes := strings.Repeat("#", hashColumns)
1051 countWidth := 1 + int(math.Log10(float64(maxCount+1)))
1052 scaleCount := 10 * float64(hashColumns) / math.Floor(1+10*math.Log10(float64(maxCount+1)))
1053 for repl, count := range bal.stats.replHistogram {
1054 nHashes := int(scaleCount * math.Log10(float64(count+1)))
1055 bal.logf("%2d: %*d %s", repl, countWidth, count, hashes[:nHashes])
1059 // CheckSanityLate checks for configuration and runtime errors after
1060 // GetCurrentState() and ComputeChangeSets() have finished.
1062 // If it returns an error, it is dangerous to run any Commit methods.
1063 func (bal *Balancer) CheckSanityLate() error {
1064 if bal.errors != nil {
1065 for _, err := range bal.errors {
1066 bal.logf("deferred error: %v", err)
1068 return fmt.Errorf("cannot proceed safely after deferred errors")
1071 if bal.collScanned == 0 {
1072 return fmt.Errorf("received zero collections")
1076 bal.BlockStateMap.Apply(func(_ arvados.SizedDigest, blk *BlockState) {
1077 for _, desired := range blk.Desired {
1085 return fmt.Errorf("zero blocks have desired replication>0")
1088 if dr := bal.DefaultReplication; dr < 1 {
1089 return fmt.Errorf("Default replication (%d) is less than 1", dr)
1092 // TODO: no two services have identical indexes
1093 // TODO: no collisions (same md5, different size)
1097 // CommitPulls sends the computed lists of pull requests to the
1098 // keepstore servers. This has the effect of increasing replication of
1099 // existing blocks that are either underreplicated or poorly
1100 // distributed according to rendezvous hashing.
1101 func (bal *Balancer) CommitPulls(c *arvados.Client) error {
1102 defer bal.time("send_pull_lists", "wall clock time to send pull lists")()
1103 return bal.commitAsync(c, "send pull list",
1104 func(srv *KeepService) error {
1105 return srv.CommitPulls(c)
1109 // CommitTrash sends the computed lists of trash requests to the
1110 // keepstore servers. This has the effect of deleting blocks that are
1111 // overreplicated or unreferenced.
1112 func (bal *Balancer) CommitTrash(c *arvados.Client) error {
1113 defer bal.time("send_trash_lists", "wall clock time to send trash lists")()
1114 return bal.commitAsync(c, "send trash list",
1115 func(srv *KeepService) error {
1116 return srv.CommitTrash(c)
1120 func (bal *Balancer) commitAsync(c *arvados.Client, label string, f func(srv *KeepService) error) error {
1121 errs := make(chan error)
1122 for _, srv := range bal.KeepServices {
1123 go func(srv *KeepService) {
1125 defer func() { errs <- err }()
1126 label := fmt.Sprintf("%s: %v", srv, label)
1129 err = fmt.Errorf("%s: %v", label, err)
1134 for range bal.KeepServices {
1135 if err := <-errs; err != nil {
1144 func (bal *Balancer) logf(f string, args ...interface{}) {
1145 if bal.Logger != nil {
1146 bal.Logger.Printf(f, args...)
1150 func (bal *Balancer) time(name, help string) func() {
1151 observer := bal.Metrics.DurationObserver(name+"_seconds", help)
1153 bal.Logger.Printf("%s: start", name)
1155 dur := time.Since(t0)
1156 observer.Observe(dur.Seconds())
1157 bal.Logger.Printf("%s: took %vs", name, dur.Seconds())
1161 // Rendezvous hash sort function. Less efficient than sorting on
1162 // precomputed rendezvous hashes, but also rarely used.
1163 func rendezvousLess(i, j string, blkid arvados.SizedDigest) bool {
1164 a := md5.Sum([]byte(string(blkid[:32]) + i))
1165 b := md5.Sum([]byte(string(blkid[:32]) + j))
1166 return bytes.Compare(a[:], b[:]) < 0