14324: Use logrus in Azure driver. Fix Sirupsen->sirupsen in imports
[arvados.git] / services / keep-balance / balance.go
index c7fa88230307e3eb47d06fac094b4768f3c7e2c8..836be2e6080c3345758a38bbd0953ace6d7a11c0 100644 (file)
@@ -10,7 +10,6 @@ import (
        "fmt"
        "log"
        "math"
-       "os"
        "runtime"
        "sort"
        "strings"
@@ -19,20 +18,9 @@ import (
 
        "git.curoverse.com/arvados.git/sdk/go/arvados"
        "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "github.com/sirupsen/logrus"
 )
 
-// CheckConfig returns an error if anything is wrong with the given
-// config and runOptions.
-func CheckConfig(config Config, runOptions RunOptions) error {
-       if len(config.KeepServiceList.Items) > 0 && config.KeepServiceTypes != nil {
-               return fmt.Errorf("cannot specify both KeepServiceList and KeepServiceTypes in config")
-       }
-       if !runOptions.Once && config.RunPeriod == arvados.Duration(0) {
-               return fmt.Errorf("you must either use the -once flag, or specify RunPeriod in config")
-       }
-       return nil
-}
-
 // Balancer compares the contents of keepstore servers with the
 // collections stored in Arvados, and issues pull/trash requests
 // needed to get (closer to) the optimal data layout.
@@ -43,11 +31,13 @@ func CheckConfig(config Config, runOptions RunOptions) error {
 // BlobSignatureTTL; and all N existing replicas of a given data block
 // are in the N best positions in rendezvous probe order.
 type Balancer struct {
+       Logger  *logrus.Logger
+       Dumper  *logrus.Logger
+       Metrics *metrics
+
        *BlockStateMap
        KeepServices       map[string]*KeepService
        DefaultReplication int
-       Logger             *log.Logger
-       Dumper             *log.Logger
        MinMtime           int64
 
        classes       []string
@@ -72,13 +62,7 @@ type Balancer struct {
 func (bal *Balancer) Run(config Config, runOptions RunOptions) (nextRunOptions RunOptions, err error) {
        nextRunOptions = runOptions
 
-       bal.Dumper = runOptions.Dumper
-       bal.Logger = runOptions.Logger
-       if bal.Logger == nil {
-               bal.Logger = log.New(os.Stderr, "", log.LstdFlags)
-       }
-
-       defer timeMe(bal.Logger, "Run")()
+       defer bal.time("sweep", "wall clock time to run one full sweep")()
 
        if len(config.KeepServiceList.Items) > 0 {
                err = bal.SetKeepServices(config.KeepServiceList)
@@ -95,7 +79,7 @@ func (bal *Balancer) Run(config Config, runOptions RunOptions) (nextRunOptions R
                        return
                }
        }
-       bal.dedupDevices()
+       bal.cleanupMounts()
 
        if err = bal.CheckSanityEarly(&config.Client); err != nil {
                return
@@ -170,7 +154,7 @@ func (bal *Balancer) DiscoverKeepServices(c *arvados.Client, okTypes []string) e
        })
 }
 
-func (bal *Balancer) dedupDevices() {
+func (bal *Balancer) cleanupMounts() {
        rwdev := map[string]*KeepService{}
        for _, srv := range bal.KeepServices {
                for _, mnt := range srv.mounts {
@@ -192,6 +176,14 @@ func (bal *Balancer) dedupDevices() {
                }
                srv.mounts = dedup
        }
+       for _, srv := range bal.KeepServices {
+               for _, mnt := range srv.mounts {
+                       if mnt.Replication <= 0 {
+                               log.Printf("%s: mount %s reports replication=%d, using replication=1", srv, mnt.UUID, mnt.Replication)
+                               mnt.Replication = 1
+                       }
+               }
+       }
 }
 
 // CheckSanityEarly checks for configuration and runtime errors that
@@ -261,7 +253,7 @@ func (bal *Balancer) ClearTrashLists(c *arvados.Client) error {
 //
 // It encodes the resulting information in BlockStateMap.
 func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) error {
-       defer timeMe(bal.Logger, "GetCurrentState")()
+       defer bal.time("get_state", "wall clock time to get current state")()
        bal.BlockStateMap = NewBlockStateMap()
 
        dd, err := c.DiscoveryDocument()
@@ -271,7 +263,7 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
        bal.DefaultReplication = dd.DefaultCollectionReplication
        bal.MinMtime = time.Now().UnixNano() - dd.BlobSignatureTTL*1e9
 
-       errs := make(chan error, 2+len(bal.KeepServices))
+       errs := make(chan error, 1)
        wg := sync.WaitGroup{}
 
        // When a device is mounted more than once, we will get its
@@ -306,7 +298,10 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
                        bal.logf("mount %s: retrieve index from %s", mounts[0], mounts[0].KeepService)
                        idx, err := mounts[0].KeepService.IndexMount(c, mounts[0].UUID, "")
                        if err != nil {
-                               errs <- fmt.Errorf("%s: retrieve index: %v", mounts[0], err)
+                               select {
+                               case errs <- fmt.Errorf("%s: retrieve index: %v", mounts[0], err):
+                               default:
+                               }
                                return
                        }
                        if len(errs) > 0 {
@@ -316,9 +311,9 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
                                return
                        }
                        for _, mount := range mounts {
-                               bal.logf("%s: add %d replicas to map", mount, len(idx))
+                               bal.logf("%s: add %d entries to map", mount, len(idx))
                                bal.BlockStateMap.AddReplicas(mount, idx)
-                               bal.logf("%s: added %d replicas", mount, len(idx))
+                               bal.logf("%s: added %d entries to map at %dx (%d replicas)", mount, len(idx), mount.Replication, len(idx)*mount.Replication)
                        }
                        bal.logf("mount %s: index done", mounts[0])
                }(mounts)
@@ -338,7 +333,10 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
                for coll := range collQ {
                        err := bal.addCollection(coll)
                        if err != nil {
-                               errs <- err
+                               select {
+                               case errs <- err:
+                               default:
+                               }
                                for range collQ {
                                }
                                return
@@ -368,7 +366,10 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
                        })
                close(collQ)
                if err != nil {
-                       errs <- err
+                       select {
+                       case errs <- err:
+                       default:
+                       }
                }
        }()
 
@@ -405,7 +406,7 @@ func (bal *Balancer) addCollection(coll arvados.Collection) error {
 func (bal *Balancer) ComputeChangeSets() {
        // This just calls balanceBlock() once for each block, using a
        // pool of worker goroutines.
-       defer timeMe(bal.Logger, "ComputeChangeSets")()
+       defer bal.time("changeset_compute", "wall clock time to compute changesets")()
        bal.setupLookupTables()
 
        type balanceTask struct {
@@ -528,7 +529,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                        slots = append(slots, slot{
                                mnt:  mnt,
                                repl: repl,
-                               want: repl != nil && (mnt.ReadOnly || repl.Mtime >= bal.MinMtime),
+                               want: repl != nil && mnt.ReadOnly,
                        })
                }
        }
@@ -554,7 +555,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                have := 0
                for _, slot := range slots {
                        if slot.repl != nil && bal.mountsByClass[class][slot.mnt] && !countedDev[slot.mnt.DeviceID] {
-                               have++
+                               have += slot.mnt.Replication
                                if slot.mnt.DeviceID != "" {
                                        countedDev[slot.mnt.DeviceID] = true
                                }
@@ -576,14 +577,14 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                                // Prefer a mount that satisfies the
                                // desired class.
                                return bal.mountsByClass[class][si.mnt]
-                       } else if wanti, wantj := si.want, si.want; wanti != wantj {
+                       } else if si.want != sj.want {
                                // Prefer a mount that will have a
                                // replica no matter what we do here
                                // -- either because it already has an
                                // untrashable replica, or because we
                                // already need it to satisfy a
                                // different storage class.
-                               return slots[i].want
+                               return si.want
                        } else if orderi, orderj := srvRendezvous[si.mnt.KeepService], srvRendezvous[sj.mnt.KeepService]; orderi != orderj {
                                // Prefer a better rendezvous
                                // position.
@@ -614,30 +615,36 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                // trashing replicas that aren't optimal positions for
                // any storage class.
                protMnt := map[*KeepMount]bool{}
+               // Replication planned so far (corresponds to wantMnt).
+               replWant := 0
+               // Protected replication (corresponds to protMnt).
+               replProt := 0
 
                // trySlot tries using a slot to meet requirements,
                // and returns true if all requirements are met.
                trySlot := func(i int) bool {
                        slot := slots[i]
-                       if wantDev[slot.mnt.DeviceID] {
+                       if wantMnt[slot.mnt] || wantDev[slot.mnt.DeviceID] {
                                // Already allocated a replica to this
                                // backend device, possibly on a
                                // different server.
                                return false
                        }
-                       if len(protMnt) < desired && slot.repl != nil {
+                       if replProt < desired && slot.repl != nil && !protMnt[slot.mnt] {
                                unsafeToDelete[slot.repl.Mtime] = true
                                protMnt[slot.mnt] = true
+                               replProt += slot.mnt.Replication
                        }
-                       if len(wantMnt) < desired && (slot.repl != nil || !slot.mnt.ReadOnly) {
+                       if replWant < desired && (slot.repl != nil || !slot.mnt.ReadOnly) {
                                slots[i].want = true
                                wantSrv[slot.mnt.KeepService] = true
                                wantMnt[slot.mnt] = true
                                if slot.mnt.DeviceID != "" {
                                        wantDev[slot.mnt.DeviceID] = true
                                }
+                               replWant += slot.mnt.Replication
                        }
-                       return len(protMnt) >= desired && len(wantMnt) >= desired
+                       return replProt >= desired && replWant >= desired
                }
 
                // First try to achieve desired replication without
@@ -664,7 +671,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                                if slot.repl == nil || !bal.mountsByClass[class][slot.mnt] {
                                        continue
                                }
-                               if safe++; safe >= desired {
+                               if safe += slot.mnt.Replication; safe >= desired {
                                        break
                                }
                        }
@@ -699,14 +706,17 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
        countedDev := map[string]bool{}
        var have, want int
        for _, slot := range slots {
+               if countedDev[slot.mnt.DeviceID] {
+                       continue
+               }
                if slot.want {
-                       want++
+                       want += slot.mnt.Replication
                }
-               if slot.repl != nil && !countedDev[slot.mnt.DeviceID] {
-                       have++
-                       if slot.mnt.DeviceID != "" {
-                               countedDev[slot.mnt.DeviceID] = true
-                       }
+               if slot.repl != nil {
+                       have += slot.mnt.Replication
+               }
+               if slot.mnt.DeviceID != "" {
+                       countedDev[slot.mnt.DeviceID] = true
                }
        }
 
@@ -715,7 +725,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                // TODO: request a Touch if Mtime is duplicated.
                var change int
                switch {
-               case !underreplicated && slot.repl != nil && !slot.want && !unsafeToDelete[slot.repl.Mtime]:
+               case !underreplicated && !slot.want && slot.repl != nil && slot.repl.Mtime < bal.MinMtime && !unsafeToDelete[slot.repl.Mtime]:
                        slot.mnt.KeepService.AddTrash(Trash{
                                SizedDigest: blkid,
                                Mtime:       slot.repl.Mtime,
@@ -779,6 +789,26 @@ type balancerStats struct {
        trashes       int
        replHistogram []int
        classStats    map[string]replicationStats
+
+       // collectionBytes / collectionBlockBytes = deduplication ratio
+       collectionBytes      int64 // sum(bytes in referenced blocks) across all collections
+       collectionBlockBytes int64 // sum(block size) across all blocks referenced by collections
+       collectionBlockRefs  int64 // sum(number of blocks referenced) across all collections
+       collectionBlocks     int64 // number of blocks referenced by any collection
+}
+
+func (s *balancerStats) dedupByteRatio() float64 {
+       if s.collectionBlockBytes == 0 {
+               return 0
+       }
+       return float64(s.collectionBytes) / float64(s.collectionBlockBytes)
+}
+
+func (s *balancerStats) dedupBlockRatio() float64 {
+       if s.collectionBlocks == 0 {
+               return 0
+       }
+       return float64(s.collectionBlockRefs) / float64(s.collectionBlocks)
 }
 
 type replicationStats struct {
@@ -802,6 +832,13 @@ func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
                surplus := result.have - result.want
                bytes := result.blkid.Size()
 
+               if rc := int64(result.blk.RefCount); rc > 0 {
+                       s.collectionBytes += rc * bytes
+                       s.collectionBlockBytes += bytes
+                       s.collectionBlockRefs += rc
+                       s.collectionBlocks++
+               }
+
                for class, state := range result.classState {
                        cs := s.classStats[class]
                        if state.unachievable {
@@ -848,7 +885,7 @@ func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
                case surplus > 0:
                        s.overrep.replicas += surplus
                        s.overrep.blocks++
-                       s.overrep.bytes += bytes * int64(len(result.blk.Replicas)-result.want)
+                       s.overrep.bytes += bytes * int64(result.have-result.want)
                default:
                        s.justright.replicas += result.want
                        s.justright.blocks++
@@ -860,22 +897,23 @@ func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
                        s.desired.blocks++
                        s.desired.bytes += bytes * int64(result.want)
                }
-               if len(result.blk.Replicas) > 0 {
-                       s.current.replicas += len(result.blk.Replicas)
+               if result.have > 0 {
+                       s.current.replicas += result.have
                        s.current.blocks++
-                       s.current.bytes += bytes * int64(len(result.blk.Replicas))
+                       s.current.bytes += bytes * int64(result.have)
                }
 
-               for len(s.replHistogram) <= len(result.blk.Replicas) {
+               for len(s.replHistogram) <= result.have {
                        s.replHistogram = append(s.replHistogram, 0)
                }
-               s.replHistogram[len(result.blk.Replicas)]++
+               s.replHistogram[result.have]++
        }
        for _, srv := range bal.KeepServices {
                s.pulls += len(srv.ChangeSet.Pulls)
                s.trashes += len(srv.ChangeSet.Trashes)
        }
        bal.stats = s
+       bal.Metrics.UpdateStats(s)
 }
 
 // PrintStatistics writes statistics about the computed changes to
@@ -969,6 +1007,7 @@ func (bal *Balancer) CheckSanityLate() error {
 // existing blocks that are either underreplicated or poorly
 // distributed according to rendezvous hashing.
 func (bal *Balancer) CommitPulls(c *arvados.Client) error {
+       defer bal.time("send_pull_lists", "wall clock time to send pull lists")()
        return bal.commitAsync(c, "send pull list",
                func(srv *KeepService) error {
                        return srv.CommitPulls(c)
@@ -979,6 +1018,7 @@ func (bal *Balancer) CommitPulls(c *arvados.Client) error {
 // keepstore servers. This has the effect of deleting blocks that are
 // overreplicated or unreferenced.
 func (bal *Balancer) CommitTrash(c *arvados.Client) error {
+       defer bal.time("send_trash_lists", "wall clock time to send trash lists")()
        return bal.commitAsync(c, "send trash list",
                func(srv *KeepService) error {
                        return srv.CommitTrash(c)
@@ -992,7 +1032,6 @@ func (bal *Balancer) commitAsync(c *arvados.Client, label string, f func(srv *Ke
                        var err error
                        defer func() { errs <- err }()
                        label := fmt.Sprintf("%s: %v", srv, label)
-                       defer timeMe(bal.Logger, label)()
                        err = f(srv)
                        if err != nil {
                                err = fmt.Errorf("%s: %v", label, err)
@@ -1016,6 +1055,17 @@ func (bal *Balancer) logf(f string, args ...interface{}) {
        }
 }
 
+func (bal *Balancer) time(name, help string) func() {
+       observer := bal.Metrics.DurationObserver(name+"_seconds", help)
+       t0 := time.Now()
+       bal.Logger.Printf("%s: start", name)
+       return func() {
+               dur := time.Since(t0)
+               observer.Observe(dur.Seconds())
+               bal.Logger.Printf("%s: took %vs", name, dur.Seconds())
+       }
+}
+
 // Rendezvous hash sort function. Less efficient than sorting on
 // precomputed rendezvous hashes, but also rarely used.
 func rendezvousLess(i, j string, blkid arvados.SizedDigest) bool {