Merge branch '9848-copy-container-output' refs #9848
[arvados.git] / services / datamanager / datamanager.go
index ce8114ade9bb469091f4cc6204a69a2df85661ac..5250d175ffa9995779c47d2fbaefedc992f09096 100644 (file)
@@ -3,8 +3,11 @@
 package main
 
 import (
+       "errors"
        "flag"
+       "fmt"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
        "git.curoverse.com/arvados.git/sdk/go/logger"
        "git.curoverse.com/arvados.git/sdk/go/util"
        "git.curoverse.com/arvados.git/services/datamanager/collection"
@@ -19,6 +22,8 @@ var (
        logEventTypePrefix  string
        logFrequencySeconds int
        minutesBetweenRuns  int
+       collectionBatchSize int
+       dryRun              bool
 )
 
 func init() {
@@ -33,40 +38,60 @@ func init() {
        flag.IntVar(&minutesBetweenRuns,
                "minutes-between-runs",
                0,
-               "How many minutes we wait betwen data manager runs. 0 means run once and exit.")
+               "How many minutes we wait between data manager runs. 0 means run once and exit.")
+       flag.IntVar(&collectionBatchSize,
+               "collection-batch-size",
+               1000,
+               "How many collections to request in each batch.")
+       flag.BoolVar(&dryRun,
+               "dry-run",
+               false,
+               "Perform a dry run. Log how many blocks would be deleted/moved, but do not issue any changes to keepstore.")
 }
 
 func main() {
        flag.Parse()
+
        if minutesBetweenRuns == 0 {
-               singlerun()
+               arv, err := arvadosclient.MakeArvadosClient()
+               if err != nil {
+                       loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error making arvados client: %v", err))
+               }
+               err = singlerun(arv)
+               if err != nil {
+                       loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("singlerun: %v", err))
+               }
        } else {
                waitTime := time.Minute * time.Duration(minutesBetweenRuns)
                for {
                        log.Println("Beginning Run")
-                       singlerun()
+                       arv, err := arvadosclient.MakeArvadosClient()
+                       if err != nil {
+                               loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error making arvados client: %v", err))
+                       }
+                       err = singlerun(arv)
+                       if err != nil {
+                               log.Printf("singlerun: %v", err)
+                       }
                        log.Printf("Sleeping for %d minutes", minutesBetweenRuns)
                        time.Sleep(waitTime)
                }
        }
 }
 
-func singlerun() {
-       arv, err := arvadosclient.MakeArvadosClient()
-       if err != nil {
-               log.Fatalf("Error setting up arvados client %s", err.Error())
-       }
+var arvLogger *logger.Logger
 
-       if is_admin, err := util.UserIsAdmin(arv); err != nil {
-               log.Fatalf("Error querying current arvados user %s", err.Error())
-       } else if !is_admin {
-               log.Fatalf("Current user is not an admin. Datamanager can only be run by admins.")
+func singlerun(arv *arvadosclient.ArvadosClient) error {
+       var err error
+       if isAdmin, err := util.UserIsAdmin(arv); err != nil {
+               return errors.New("Error verifying admin token: " + err.Error())
+       } else if !isAdmin {
+               return errors.New("Current user is not an admin. Datamanager requires a privileged token.")
        }
 
-       var arvLogger *logger.Logger
        if logEventTypePrefix != "" {
-               arvLogger = logger.NewLogger(logger.LoggerParams{
-                       Client: arv,
+               arvLogger, err = logger.NewLogger(logger.LoggerParams{
+                       Client:          arv,
                        EventTypePrefix: logEventTypePrefix,
                        WriteInterval:   time.Second * time.Duration(logFrequencySeconds)})
        }
@@ -77,56 +102,119 @@ func singlerun() {
        }
 
        var (
+               dataFetcher     summary.DataFetcher
                readCollections collection.ReadCollections
-               keepServerInfo keep.ReadServers
+               keepServerInfo  keep.ReadServers
        )
 
-       if !summary.MaybeReadData(arvLogger, &readCollections, &keepServerInfo) {
-               collectionChannel := make(chan collection.ReadCollections)
+       if summary.ShouldReadData() {
+               dataFetcher = summary.ReadData
+       } else {
+               dataFetcher = BuildDataFetcher(arv)
+       }
 
-               go func() {
-                       collectionChannel <- collection.GetCollectionsAndSummarize(
-                               collection.GetCollectionsParams{
-                                       Client: arv,
-                                       Logger: arvLogger,
-                                       BatchSize: 50})
-               }()
+       err = dataFetcher(arvLogger, &readCollections, &keepServerInfo)
+       if err != nil {
+               return err
+       }
 
-               keepServerInfo = keep.GetKeepServersAndSummarize(
-                       keep.GetKeepServersParams{
-                               Client: arv,
-                               Logger: arvLogger,
-                               Limit: 1000})
+       err = summary.MaybeWriteData(arvLogger, readCollections, keepServerInfo)
+       if err != nil {
+               return err
+       }
+
+       buckets := summary.BucketReplication(readCollections, keepServerInfo)
+       bucketCounts := buckets.Counts()
 
-               readCollections = <-collectionChannel
+       replicationSummary := buckets.SummarizeBuckets(readCollections)
+       replicationCounts := replicationSummary.ComputeCounts()
+
+       log.Printf("Blocks In Collections: %d, "+
+               "\nBlocks In Keep: %d.",
+               len(readCollections.BlockToDesiredReplication),
+               len(keepServerInfo.BlockToServers))
+       log.Println(replicationCounts.PrettyPrint())
+
+       log.Printf("Blocks Histogram:")
+       for _, rlbss := range bucketCounts {
+               log.Printf("%+v: %10d",
+                       rlbss.Levels,
+                       rlbss.Count)
        }
 
-       summary.MaybeWriteData(arvLogger, readCollections, keepServerInfo)
-
-       replicationSummary :=
-               summary.SummarizeReplication(arvLogger, readCollections, keepServerInfo)
-
-       log.Printf("Replication Counts:" +
-               "\nBlocks In Collections: %d, " +
-               "\nBlocks In Keep: %d, " +
-               "\nMissing From Keep: %d, " +
-               "\nUnder Replicated: %d, " +
-               "\nOver Replicated: %d, " +
-               "\nReplicated Just Right: %d, " +
-               "\nNot In Any Collection: %d.",
-               len(readCollections.BlockToReplication),
-               len(keepServerInfo.BlockToServers),
-               len(replicationSummary.CollectionBlocksNotInKeep),
-               len(replicationSummary.UnderReplicatedBlocks),
-               len(replicationSummary.OverReplicatedBlocks),
-               len(replicationSummary.CorrectlyReplicatedBlocks),
-               len(replicationSummary.KeepBlocksNotInCollections))
+       kc, err := keepclient.MakeKeepClient(arv)
+       if err != nil {
+               return fmt.Errorf("Error setting up keep client %v", err.Error())
+       }
 
        // Log that we're finished. We force the recording, since go will
-       // not wait for the timer before exiting.
+       // not wait for the write timer before exiting.
        if arvLogger != nil {
-               arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) {
+               defer arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) {
+                       summaryInfo := logger.GetOrCreateMap(p, "summary_info")
+                       summaryInfo["block_replication_counts"] = bucketCounts
+                       summaryInfo["replication_summary"] = replicationCounts
+                       p["summary_info"] = summaryInfo
+
                        p["run_info"].(map[string]interface{})["finished_at"] = time.Now()
                })
        }
+
+       pullServers := summary.ComputePullServers(kc,
+               &keepServerInfo,
+               readCollections.BlockToDesiredReplication,
+               replicationSummary.UnderReplicatedBlocks)
+
+       pullLists := summary.BuildPullLists(pullServers)
+
+       trashLists, trashErr := summary.BuildTrashLists(kc,
+               &keepServerInfo,
+               replicationSummary.KeepBlocksNotInCollections)
+
+       err = summary.WritePullLists(arvLogger, pullLists, dryRun)
+       if err != nil {
+               return err
+       }
+
+       if trashErr != nil {
+               return err
+       }
+       keep.SendTrashLists(arvLogger, kc, trashLists, dryRun)
+
+       return nil
+}
+
+// BuildDataFetcher returns a data fetcher that fetches data from remote servers.
+func BuildDataFetcher(arv *arvadosclient.ArvadosClient) summary.DataFetcher {
+       return func(
+               arvLogger *logger.Logger,
+               readCollections *collection.ReadCollections,
+               keepServerInfo *keep.ReadServers,
+       ) error {
+               collDone := make(chan struct{})
+               var collErr error
+               go func() {
+                       *readCollections, collErr = collection.GetCollectionsAndSummarize(
+                               collection.GetCollectionsParams{
+                                       Client:    arv,
+                                       Logger:    arvLogger,
+                                       BatchSize: collectionBatchSize})
+                       collDone <- struct{}{}
+               }()
+
+               var keepErr error
+               *keepServerInfo, keepErr = keep.GetKeepServersAndSummarize(
+                       keep.GetKeepServersParams{
+                               Client: arv,
+                               Logger: arvLogger,
+                               Limit:  1000})
+
+               <-collDone
+
+               // Return a nil error only if both parts succeeded.
+               if collErr != nil {
+                       return collErr
+               }
+               return keepErr
+       }
 }