13 "git.curoverse.com/arvados.git/sdk/go/arvados"
14 "git.curoverse.com/arvados.git/sdk/go/keepclient"
17 // CheckConfig returns an error if anything is wrong with the given
18 // config and runOptions.
19 func CheckConfig(config Config, runOptions RunOptions) error {
20 if len(config.KeepServiceList.Items) > 0 && config.KeepServiceTypes != nil {
21 return fmt.Errorf("cannot specify both KeepServiceList and KeepServiceTypes in config")
23 if !runOptions.Once && config.RunPeriod == arvados.Duration(0) {
24 return fmt.Errorf("you must either use the -once flag, or specify RunPeriod in config")
29 // Balancer compares the contents of keepstore servers with the
30 // collections stored in Arvados, and issues pull/trash requests
31 // needed to get (closer to) the optimal data layout.
33 // In the optimal data layout: every data block referenced by a
34 // collection is replicated at least as many times as desired by the
35 // collection; there are no unreferenced data blocks older than
36 // BlobSignatureTTL; and all N existing replicas of a given data block
37 // are in the N best positions in rendezvous probe order.
38 type Balancer struct {
40 KeepServices map[string]*KeepService
41 DefaultReplication int
47 serviceRoots map[string]string
52 // Run performs a balance operation using the given config and
53 // runOptions. It should only be called once on a given Balancer
54 // object. Typical usage:
56 // err = (&Balancer{}).Run(config, runOptions)
57 func (bal *Balancer) Run(config Config, runOptions RunOptions) (err error) {
58 bal.Dumper = runOptions.Dumper
59 bal.Logger = runOptions.Logger
60 if bal.Logger == nil {
61 bal.Logger = log.New(os.Stderr, "", log.LstdFlags)
64 defer timeMe(bal.Logger, "Run")()
66 if len(config.KeepServiceList.Items) > 0 {
67 err = bal.SetKeepServices(config.KeepServiceList)
69 err = bal.DiscoverKeepServices(&config.Client, config.KeepServiceTypes)
75 if err = bal.CheckSanityEarly(&config.Client); err != nil {
78 if runOptions.CommitTrash {
79 if err = bal.ClearTrashLists(&config.Client); err != nil {
83 if err = bal.GetCurrentState(&config.Client); err != nil {
86 bal.ComputeChangeSets()
88 if err = bal.CheckSanityLate(); err != nil {
91 if runOptions.CommitPulls {
92 err = bal.CommitPulls(&config.Client)
94 // Skip trash if we can't pull. (Too cautious?)
98 if runOptions.CommitTrash {
99 err = bal.CommitTrash(&config.Client)
104 // SetKeepServices sets the list of KeepServices to operate on.
105 func (bal *Balancer) SetKeepServices(srvList arvados.KeepServiceList) error {
106 bal.KeepServices = make(map[string]*KeepService)
107 for _, srv := range srvList.Items {
108 bal.KeepServices[srv.UUID] = &KeepService{
110 ChangeSet: &ChangeSet{},
116 // DiscoverKeepServices sets the list of KeepServices by calling the
117 // API to get a list of all services, and selecting the ones whose
118 // ServiceType is in okTypes.
119 func (bal *Balancer) DiscoverKeepServices(c *arvados.Client, okTypes []string) error {
120 bal.KeepServices = make(map[string]*KeepService)
121 ok := make(map[string]bool)
122 for _, t := range okTypes {
125 return c.EachKeepService(func(srv arvados.KeepService) error {
126 if ok[srv.ServiceType] {
127 bal.KeepServices[srv.UUID] = &KeepService{
129 ChangeSet: &ChangeSet{},
132 bal.logf("skipping %v with service type %q", srv.UUID, srv.ServiceType)
138 // CheckSanityEarly checks for configuration and runtime errors that
139 // can be detected before GetCurrentState() and ComputeChangeSets()
142 // If it returns an error, it is pointless to run GetCurrentState or
143 // ComputeChangeSets: after doing so, the statistics would be
144 // meaningless and it would be dangerous to run any Commit methods.
145 func (bal *Balancer) CheckSanityEarly(c *arvados.Client) error {
146 u, err := c.CurrentUser()
148 return fmt.Errorf("CurrentUser(): %v", err)
150 if !u.IsActive || !u.IsAdmin {
151 return fmt.Errorf("current user (%s) is not an active admin user", u.UUID)
153 for _, srv := range bal.KeepServices {
154 if srv.ServiceType == "proxy" {
155 return fmt.Errorf("config error: %s: proxy servers cannot be balanced", srv)
161 // ClearTrashLists sends an empty trash list to each keep
162 // service. Calling this before GetCurrentState avoids races.
164 // When a block appears in an index, we assume that replica will still
165 // exist after we delete other replicas on other servers. However,
166 // it's possible that a previous rebalancing operation made different
167 // decisions (e.g., servers were added/removed, and rendezvous order
168 // changed). In this case, the replica might already be on that
169 // server's trash list, and it might be deleted before we send a
170 // replacement trash list.
172 // We avoid this problem if we clear all trash lists before getting
173 // indexes. (We also assume there is only one rebalancing process
174 // running at a time.)
175 func (bal *Balancer) ClearTrashLists(c *arvados.Client) error {
176 for _, srv := range bal.KeepServices {
177 srv.ChangeSet = &ChangeSet{}
179 return bal.CommitTrash(c)
182 // GetCurrentState determines the current replication state, and the
183 // desired replication level, for every block that is either
184 // retrievable or referenced.
186 // It determines the current replication state by reading the block index
187 // from every known Keep service.
189 // It determines the desired replication level by retrieving all
190 // collection manifests in the database (API server).
192 // It encodes the resulting information in BlockStateMap.
193 func (bal *Balancer) GetCurrentState(c *arvados.Client) error {
194 defer timeMe(bal.Logger, "GetCurrentState")()
195 bal.BlockStateMap = NewBlockStateMap()
197 dd, err := c.DiscoveryDocument()
201 bal.DefaultReplication = dd.DefaultCollectionReplication
202 bal.MinMtime = time.Now().Unix() - dd.BlobSignatureTTL
204 errs := make(chan error, 2+len(bal.KeepServices))
205 wg := sync.WaitGroup{}
207 // Start one goroutine for each KeepService: retrieve the
208 // index, and add the returned blocks to BlockStateMap.
209 for _, srv := range bal.KeepServices {
211 go func(srv *KeepService) {
213 bal.logf("%s: retrieve index", srv)
214 idx, err := srv.Index(c, "")
216 errs <- fmt.Errorf("%s: %v", srv, err)
219 bal.logf("%s: add %d replicas to map", srv, len(idx))
220 bal.BlockStateMap.AddReplicas(srv, idx)
221 bal.logf("%s: done", srv)
225 // collQ buffers incoming collections so we can start fetching
226 // the next page without waiting for the current page to
227 // finish processing. (1000 happens to match the page size
228 // used by (*arvados.Client)EachCollection(), but it's OK if
229 // they don't match.)
230 collQ := make(chan arvados.Collection, 1000)
232 // Start a goroutine to process collections. (We could use a
233 // worker pool here, but even with a single worker we already
234 // process collections much faster than we can retrieve them.)
238 for coll := range collQ {
239 err := bal.addCollection(coll)
250 // Start a goroutine to retrieve all collections from the
251 // Arvados database and send them to collQ for processing.
255 err = EachCollection(c,
256 func(coll arvados.Collection) error {
259 // some other GetCurrentState
260 // error happened: no point
263 return fmt.Errorf("")
266 }, func(done, total int) {
267 bal.logf("collections: %d/%d", done, total)
276 // Send a nil error when all goroutines finish. If
277 // this is the first error sent to errs, then
278 // everything worked.
285 func (bal *Balancer) addCollection(coll arvados.Collection) error {
286 blkids, err := coll.SizedDigests()
289 bal.errors = append(bal.errors, fmt.Errorf("%v: %v", coll.UUID, err))
293 repl := bal.DefaultReplication
294 if coll.ReplicationDesired != nil {
295 repl = *coll.ReplicationDesired
297 debugf("%v: %d block x%d", coll.UUID, len(blkids), repl)
298 bal.BlockStateMap.IncreaseDesired(repl, blkids)
302 // ComputeChangeSets compares, for each known block, the current and
303 // desired replication states. If it is possible to get closer to the
304 // desired state by copying or deleting blocks, it adds those changes
305 // to the relevant KeepServices' ChangeSets.
307 // It does not actually apply any of the computed changes.
308 func (bal *Balancer) ComputeChangeSets() {
309 // This just calls balanceBlock() once for each block, using a
310 // pool of worker goroutines.
311 defer timeMe(bal.Logger, "ComputeChangeSets")()
312 bal.setupServiceRoots()
314 type balanceTask struct {
315 blkid arvados.SizedDigest
318 nWorkers := 1 + runtime.NumCPU()
319 todo := make(chan balanceTask, nWorkers)
320 var wg sync.WaitGroup
321 for i := 0; i < nWorkers; i++ {
324 for work := range todo {
325 bal.balanceBlock(work.blkid, work.blk)
330 bal.BlockStateMap.Apply(func(blkid arvados.SizedDigest, blk *BlockState) {
340 func (bal *Balancer) setupServiceRoots() {
341 bal.serviceRoots = make(map[string]string)
342 for _, srv := range bal.KeepServices {
343 bal.serviceRoots[srv.UUID] = srv.UUID
354 var changeName = map[int]string{
357 changeTrash: "trash",
361 // balanceBlock compares current state to desired state for a single
362 // block, and makes the appropriate ChangeSet calls.
363 func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) {
364 debugf("balanceBlock: %v %+v", blkid, blk)
365 uuids := keepclient.NewRootSorter(bal.serviceRoots, string(blkid[:32])).GetSortedRoots()
366 hasRepl := make(map[string]Replica, len(bal.serviceRoots))
367 for _, repl := range blk.Replicas {
368 hasRepl[repl.UUID] = repl
369 // TODO: when multiple copies are on one server, use
370 // the oldest one that doesn't have a timestamp
371 // collision with other replicas.
373 // number of replicas already found in positions better than
374 // the position we're contemplating now.
375 reportedBestRepl := 0
376 // To be safe we assume two replicas with the same Mtime are
377 // in fact the same replica being reported more than
378 // once. len(uniqueBestRepl) is the number of distinct
379 // replicas in the best rendezvous positions we've considered
381 uniqueBestRepl := make(map[int64]bool, len(bal.serviceRoots))
382 // pulls is the number of Pull changes we have already
383 // requested. (For purposes of deciding whether to Pull to
384 // rendezvous position N, we should assume all pulls we have
385 // requested on rendezvous positions M<N will be successful.)
388 for _, uuid := range uuids {
390 srv := bal.KeepServices[uuid]
391 // TODO: request a Touch if Mtime is duplicated.
392 repl, ok := hasRepl[srv.UUID]
394 // This service has a replica. We should
395 // delete it if [1] we already have enough
396 // distinct replicas in better rendezvous
397 // positions and [2] this replica's Mtime is
398 // distinct from all of the better replicas'
401 repl.Mtime < bal.MinMtime &&
402 len(uniqueBestRepl) >= blk.Desired &&
403 !uniqueBestRepl[repl.Mtime] {
412 uniqueBestRepl[repl.Mtime] = true
414 } else if pulls+reportedBestRepl < blk.Desired &&
415 len(blk.Replicas) > 0 &&
417 // This service doesn't have a replica. We
418 // should pull one to this server if we don't
419 // already have enough (existing+requested)
420 // replicas in better rendezvous positions.
423 Source: blk.Replicas[0].KeepService,
428 if bal.Dumper != nil {
429 changes = append(changes, fmt.Sprintf("%s:%d=%s,%d", srv.ServiceHost, srv.ServicePort, changeName[change], repl.Mtime))
432 if bal.Dumper != nil {
433 bal.Dumper.Printf("%s have=%d want=%d %s", blkid, len(blk.Replicas), blk.Desired, strings.Join(changes, " "))
437 type blocksNBytes struct {
443 func (bb blocksNBytes) String() string {
444 return fmt.Sprintf("%d replicas (%d blocks, %d bytes)", bb.replicas, bb.blocks, bb.bytes)
447 type balancerStats struct {
448 lost, overrep, unref, garbage, underrep, justright blocksNBytes
449 desired, current blocksNBytes
454 func (bal *Balancer) getStatistics() (s balancerStats) {
455 s.replHistogram = make([]int, 2)
456 bal.BlockStateMap.Apply(func(blkid arvados.SizedDigest, blk *BlockState) {
457 surplus := len(blk.Replicas) - blk.Desired
458 bytes := blkid.Size()
460 case len(blk.Replicas) == 0 && blk.Desired > 0:
461 s.lost.replicas -= surplus
463 s.lost.bytes += bytes * int64(-surplus)
464 case len(blk.Replicas) < blk.Desired:
465 s.underrep.replicas -= surplus
467 s.underrep.bytes += bytes * int64(-surplus)
468 case len(blk.Replicas) > 0 && blk.Desired == 0:
469 counter := &s.garbage
470 for _, r := range blk.Replicas {
471 if r.Mtime >= bal.MinMtime {
476 counter.replicas += surplus
478 counter.bytes += bytes * int64(surplus)
479 case len(blk.Replicas) > blk.Desired:
480 s.overrep.replicas += surplus
482 s.overrep.bytes += bytes * int64(len(blk.Replicas)-blk.Desired)
484 s.justright.replicas += blk.Desired
486 s.justright.bytes += bytes * int64(blk.Desired)
490 s.desired.replicas += blk.Desired
492 s.desired.bytes += bytes * int64(blk.Desired)
494 if len(blk.Replicas) > 0 {
495 s.current.replicas += len(blk.Replicas)
497 s.current.bytes += bytes * int64(len(blk.Replicas))
500 for len(s.replHistogram) <= len(blk.Replicas) {
501 s.replHistogram = append(s.replHistogram, 0)
503 s.replHistogram[len(blk.Replicas)]++
505 for _, srv := range bal.KeepServices {
506 s.pulls += len(srv.ChangeSet.Pulls)
507 s.trashes += len(srv.ChangeSet.Trashes)
512 // PrintStatistics writes statistics about the computed changes to
513 // bal.Logger. It should not be called until ComputeChangeSets has
515 func (bal *Balancer) PrintStatistics() {
516 s := bal.getStatistics()
518 bal.logf("%s lost (0=have<want)", s.lost)
519 bal.logf("%s underreplicated (0<have<want)", s.underrep)
520 bal.logf("%s just right (have=want)", s.justright)
521 bal.logf("%s overreplicated (have>want>0)", s.overrep)
522 bal.logf("%s unreferenced (have>want=0, new)", s.unref)
523 bal.logf("%s garbage (have>want=0, old)", s.garbage)
525 bal.logf("%s total commitment (excluding unreferenced)", s.desired)
526 bal.logf("%s total usage", s.current)
528 for _, srv := range bal.KeepServices {
529 bal.logf("%s: %v\n", srv, srv.ChangeSet)
532 bal.printHistogram(s, 60)
536 func (bal *Balancer) printHistogram(s balancerStats, hashColumns int) {
537 bal.logf("Replication level distribution (counting N replicas on a single server as N):")
539 for _, count := range s.replHistogram {
540 if maxCount < count {
544 hashes := strings.Repeat("#", hashColumns)
545 countWidth := 1 + int(math.Log10(float64(maxCount+1)))
546 scaleCount := 10 * float64(hashColumns) / math.Floor(1+10*math.Log10(float64(maxCount+1)))
547 for repl, count := range s.replHistogram {
548 nHashes := int(scaleCount * math.Log10(float64(count+1)))
549 bal.logf("%2d: %*d %s", repl, countWidth, count, hashes[:nHashes])
553 // CheckSanityLate checks for configuration and runtime errors after
554 // GetCurrentState() and ComputeChangeSets() have finished.
556 // If it returns an error, it is dangerous to run any Commit methods.
557 func (bal *Balancer) CheckSanityLate() error {
558 if bal.errors != nil {
559 for _, err := range bal.errors {
560 bal.logf("deferred error: %v", err)
562 return fmt.Errorf("cannot proceed safely after deferred errors")
565 if bal.collScanned == 0 {
566 return fmt.Errorf("received zero collections")
570 bal.BlockStateMap.Apply(func(_ arvados.SizedDigest, blk *BlockState) {
576 return fmt.Errorf("zero blocks have desired replication>0")
579 if dr := bal.DefaultReplication; dr < 1 {
580 return fmt.Errorf("Default replication (%d) is less than 1", dr)
583 // TODO: no two services have identical indexes
584 // TODO: no collisions (same md5, different size)
588 // CommitPulls sends the computed lists of pull requests to the
589 // keepstore servers. This has the effect of increasing replication of
590 // existing blocks that are either underreplicated or poorly
591 // distributed according to rendezvous hashing.
592 func (bal *Balancer) CommitPulls(c *arvados.Client) error {
593 return bal.commitAsync(c, "send pull list",
594 func(srv *KeepService) error {
595 return srv.CommitPulls(c)
599 // CommitTrash sends the computed lists of trash requests to the
600 // keepstore servers. This has the effect of deleting blocks that are
601 // overreplicated or unreferenced.
602 func (bal *Balancer) CommitTrash(c *arvados.Client) error {
603 return bal.commitAsync(c, "send trash list",
604 func(srv *KeepService) error {
605 return srv.CommitTrash(c)
609 func (bal *Balancer) commitAsync(c *arvados.Client, label string, f func(srv *KeepService) error) error {
610 errs := make(chan error)
611 for _, srv := range bal.KeepServices {
612 go func(srv *KeepService) {
614 defer func() { errs <- err }()
615 label := fmt.Sprintf("%s: %v", srv, label)
616 defer timeMe(bal.Logger, label)()
619 err = fmt.Errorf("%s: %v", label, err)
624 for _ = range bal.KeepServices {
625 if err := <-errs; err != nil {
634 func (bal *Balancer) logf(f string, args ...interface{}) {
635 if bal.Logger != nil {
636 bal.Logger.Printf(f, args...)