+
+func (bal *Balancer) updateCollections(ctx context.Context, c *arvados.Client, cluster *arvados.Cluster) error {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ defer bal.time("update_collections", "wall clock time to update collections")()
+ threshold := time.Now()
+ thresholdStr := threshold.Format(time.RFC3339Nano)
+
+ var err error
+ collQ := make(chan arvados.Collection, cluster.Collections.BalanceCollectionBuffers)
+ go func() {
+ defer close(collQ)
+ err = EachCollection(ctx, bal.DB, c, func(coll arvados.Collection) error {
+ if coll.ModifiedAt.After(threshold) {
+ return io.EOF
+ }
+ if coll.IsTrashed {
+ return nil
+ }
+ collQ <- coll
+ return nil
+ }, func(done, total int) {
+ bal.logf("update collections: %d/%d", done, total)
+ })
+ if err == io.EOF {
+ err = nil
+ } else if err != nil {
+ bal.logf("error updating collections: %s", err)
+ }
+ }()
+
+ var updated int64
+ var wg sync.WaitGroup
+ for i := 0; i < runtime.NumCPU(); i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for coll := range collQ {
+ blkids, err := coll.SizedDigests()
+ if err != nil {
+ bal.logf("%s: %s", coll.UUID, err)
+ continue
+ }
+ repl := bal.BlockStateMap.GetConfirmedReplication(blkids, coll.StorageClassesDesired)
+ classes, err := json.Marshal(coll.StorageClassesDesired)
+ if err != nil {
+ bal.logf("BUG? json.Marshal(%v) failed: %s", classes, err)
+ continue
+ }
+ _, err = bal.DB.ExecContext(ctx, `update collections set
+ replication_confirmed=$1,
+ replication_confirmed_at=$2,
+ storage_classes_confirmed=$3,
+ storage_classes_confirmed_at=$2
+ where uuid=$4`,
+ repl, thresholdStr, classes, coll.UUID)
+ if err != nil {
+ bal.logf("%s: update failed: %s", coll.UUID, err)
+ continue
+ }
+ atomic.AddInt64(&updated, 1)
+ }
+ }()
+ }
+ wg.Wait()
+ bal.logf("updated %d collections", updated)
+ return err
+}