6277: add default_empty_manifest before_validation filter and update the tests accord...
[arvados.git] / services / datamanager / datamanager.go
index 87a71a9a4b8162e2c101d8e3961b0024ad11bb4f..70a9ae785956396bab936e73b1a7f6ed04c63731 100644 (file)
@@ -4,36 +4,62 @@ package main
 
 import (
        "flag"
+       "fmt"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
        "git.curoverse.com/arvados.git/sdk/go/logger"
        "git.curoverse.com/arvados.git/sdk/go/util"
        "git.curoverse.com/arvados.git/services/datamanager/collection"
        "git.curoverse.com/arvados.git/services/datamanager/keep"
+       "git.curoverse.com/arvados.git/services/datamanager/loggerutil"
+       "git.curoverse.com/arvados.git/services/datamanager/summary"
        "log"
-       "os"
-       "runtime"
        "time"
 )
 
 var (
-       logEventType string
+       logEventTypePrefix  string
        logFrequencySeconds int
+       minutesBetweenRuns  int
 )
 
 func init() {
-       flag.StringVar(&logEventType
-               "log-event-type",
-               "experimental-data-manager-report",
-               "event_type to use in our arvados log entries. Set to empty to turn off logging")
-       flag.IntVar(&logFrequencySeconds, 
+       flag.StringVar(&logEventTypePrefix,
+               "log-event-type-prefix",
+               "experimental-data-manager",
+               "Prefix to use in the event_type of our arvados log entries. Set to empty to turn off logging")
+       flag.IntVar(&logFrequencySeconds,
                "log-frequency-seconds",
                20,
                "How frequently we'll write log entries in seconds.")
+       flag.IntVar(&minutesBetweenRuns,
+               "minutes-between-runs",
+               0,
+               "How many minutes we wait betwen data manager runs. 0 means run once and exit.")
 }
 
 func main() {
        flag.Parse()
+       if minutesBetweenRuns == 0 {
+               err := singlerun()
+               if err != nil {
+                       log.Fatalf("Got an error: %v", err)
+               }
+       } else {
+               waitTime := time.Minute * time.Duration(minutesBetweenRuns)
+               for {
+                       log.Println("Beginning Run")
+                       err := singlerun()
+                       if err != nil {
+                               log.Printf("Got an error: %v", err)
+                       }
+                       log.Printf("Sleeping for %d minutes", minutesBetweenRuns)
+                       time.Sleep(waitTime)
+               }
+       }
+}
 
+func singlerun() error {
        arv, err := arvadosclient.MakeArvadosClient()
        if err != nil {
                log.Fatalf("Error setting up arvados client %s", err.Error())
@@ -46,84 +72,115 @@ func main() {
        }
 
        var arvLogger *logger.Logger
-       if logEventType != "" {
-               arvLogger = logger.NewLogger(logger.LoggerParams{Client: arv,
-                       EventType: logEventType,
-                       MinimumWriteInterval: time.Second * time.Duration(logFrequencySeconds)})
+       if logEventTypePrefix != "" {
+               arvLogger = logger.NewLogger(logger.LoggerParams{
+                       Client:          arv,
+                       EventTypePrefix: logEventTypePrefix,
+                       WriteInterval:   time.Second * time.Duration(logFrequencySeconds)})
        }
 
+       loggerutil.LogRunInfo(arvLogger)
        if arvLogger != nil {
-               properties, _ := arvLogger.Edit()
-               runInfo := make(map[string]interface{})
-               runInfo["start_time"] = time.Now()
-               runInfo["args"] = os.Args
-               hostname, err := os.Hostname()
-               if err != nil {
-                       runInfo["hostname_error"] = err.Error()
-               } else {
-                       runInfo["hostname"] = hostname
-               }
-               runInfo["pid"] = os.Getpid()
-               properties["run_info"] = runInfo
+               arvLogger.AddWriteHook(loggerutil.LogMemoryAlloc)
+       }
 
-               arvLogger.AddWriteHook(LogMemoryAlloc)
+       var (
+               dataFetcher     summary.DataFetcher
+               readCollections collection.ReadCollections
+               keepServerInfo  keep.ReadServers
+       )
 
-               arvLogger.Record()
+       if summary.ShouldReadData() {
+               dataFetcher = summary.ReadData
+       } else {
+               dataFetcher = BuildDataFetcher(arv)
        }
 
-       // TODO(misha): Read Collections and Keep Contents concurrently as goroutines.
-       // This requires waiting on them to finish before you let main() exit.
+       dataFetcher(arvLogger, &readCollections, &keepServerInfo)
 
-       RunCollections(collection.GetCollectionsParams{
-               Client: arv, Logger: arvLogger, BatchSize: 50})
+       summary.MaybeWriteData(arvLogger, readCollections, keepServerInfo)
 
-       RunKeep(keep.GetKeepServersParams{Client: arv, Limit: 1000})
-}
+       buckets := summary.BucketReplication(readCollections, keepServerInfo)
+       bucketCounts := buckets.Counts()
+
+       replicationSummary := buckets.SummarizeBuckets(readCollections)
+       replicationCounts := replicationSummary.ComputeCounts()
 
-func RunCollections(params collection.GetCollectionsParams) {
-       readCollections := collection.GetCollections(params)
+       log.Printf("Blocks In Collections: %d, "+
+               "\nBlocks In Keep: %d.",
+               len(readCollections.BlockToDesiredReplication),
+               len(keepServerInfo.BlockToServers))
+       log.Println(replicationCounts.PrettyPrint())
 
-       UserUsage := ComputeSizeOfOwnedCollections(readCollections)
-       log.Printf("Uuid to Size used: %v", UserUsage)
+       log.Printf("Blocks Histogram:")
+       for _, rlbss := range bucketCounts {
+               log.Printf("%+v: %10d",
+                       rlbss.Levels,
+                       rlbss.Count)
+       }
 
-       // TODO(misha): Add a "readonly" flag. If we're in readonly mode,
-       // lots of behaviors can become warnings (and obviously we can't
-       // write anything).
-       // if !readCollections.ReadAllCollections {
-       //      log.Fatalf("Did not read all collections")
-       // }
+       kc, err := keepclient.MakeKeepClient(&arv)
+       if err != nil {
+               loggerutil.FatalWithMessage(arvLogger,
+                       fmt.Sprintf("Error setting up keep client %s", err.Error()))
+       }
 
-       log.Printf("Read and processed %d collections",
-               len(readCollections.UuidToCollection))
-}
+       // Log that we're finished. We force the recording, since go will
+       // not wait for the write timer before exiting.
+       if arvLogger != nil {
+               defer arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) {
+                       summaryInfo := logger.GetOrCreateMap(p, "summary_info")
+                       summaryInfo["block_replication_counts"] = bucketCounts
+                       summaryInfo["replication_summary"] = replicationCounts
+                       p["summary_info"] = summaryInfo
+
+                       p["run_info"].(map[string]interface{})["finished_at"] = time.Now()
+               })
+       }
 
-func RunKeep(params keep.GetKeepServersParams) {
-       readServers := keep.GetKeepServers(params)
+       pullServers := summary.ComputePullServers(kc,
+               &keepServerInfo,
+               readCollections.BlockToDesiredReplication,
+               replicationSummary.UnderReplicatedBlocks)
 
-       log.Printf("Returned %d keep disks", len(readServers.ServerToContents))
+       pullLists := summary.BuildPullLists(pullServers)
 
-       blockReplicationCounts := make(map[int]int)
-       for _, infos := range readServers.BlockToServers {
-               replication := len(infos)
-               blockReplicationCounts[replication] += 1
-       }
+       trashLists, trashErr := summary.BuildTrashLists(kc,
+               &keepServerInfo,
+               replicationSummary.KeepBlocksNotInCollections)
 
-       log.Printf("Replication level distribution: %v", blockReplicationCounts)
-}
+       summary.WritePullLists(arvLogger, pullLists)
 
-func ComputeSizeOfOwnedCollections(readCollections collection.ReadCollections) (
-       results map[string]int) {
-       results = make(map[string]int)
-       for _, coll := range readCollections.UuidToCollection {
-               results[coll.OwnerUuid] = results[coll.OwnerUuid] + coll.TotalSize
+       if trashErr != nil {
+               return err
+       } else {
+               keep.SendTrashLists(keep.GetDataManagerToken(arvLogger), kc, trashLists)
        }
-       return
+
+       return nil
 }
 
-func LogMemoryAlloc(properties map[string]interface{}, entry map[string]interface{}) {
-       _ = entry  // keep the compiler from complaining
-       runInfo := properties["run_info"].(map[string]interface{})
-       var memStats runtime.MemStats
-       runtime.ReadMemStats(&memStats)
-       runInfo["alloc_bytes_in_use"] = memStats.Alloc
+// Returns a data fetcher that fetches data from remote servers.
+func BuildDataFetcher(arv arvadosclient.ArvadosClient) summary.DataFetcher {
+       return func(arvLogger *logger.Logger,
+               readCollections *collection.ReadCollections,
+               keepServerInfo *keep.ReadServers) {
+               collectionChannel := make(chan collection.ReadCollections)
+
+               go func() {
+                       collectionChannel <- collection.GetCollectionsAndSummarize(
+                               collection.GetCollectionsParams{
+                                       Client:    arv,
+                                       Logger:    arvLogger,
+                                       BatchSize: 50})
+               }()
+
+               *keepServerInfo = keep.GetKeepServersAndSummarize(
+                       keep.GetKeepServersParams{
+                               Client: arv,
+                               Logger: arvLogger,
+                               Limit:  1000})
+
+               *readCollections = <-collectionChannel
+       }
 }