Added logger util GetOrCreateMap() and started using it everywhere.
[arvados.git] / services / datamanager / datamanager.go
index 339a54106afdcd7e589029aa5651beeb8ba01944..d3efe621731c0e1a93cb4dc333fdd9537bad65c2 100644 (file)
@@ -3,17 +3,55 @@
 package main
 
 import (
-       //"git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "flag"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/logger"
        "git.curoverse.com/arvados.git/sdk/go/util"
        "git.curoverse.com/arvados.git/services/datamanager/collection"
+       "git.curoverse.com/arvados.git/services/datamanager/keep"
+       "git.curoverse.com/arvados.git/services/datamanager/loggerutil"
+       "git.curoverse.com/arvados.git/services/datamanager/summary"
        "log"
+       "time"
 )
 
-// Helper type so we don't have to write out 'map[string]interface{}' every time.
-type Dict map[string]interface{}
+var (
+       logEventTypePrefix  string
+       logFrequencySeconds int
+       minutesBetweenRuns  int
+)
+
+func init() {
+       flag.StringVar(&logEventTypePrefix,
+               "log-event-type-prefix",
+               "experimental-data-manager",
+               "Prefix to use in the event_type of our arvados log entries. Set to empty to turn off logging")
+       flag.IntVar(&logFrequencySeconds,
+               "log-frequency-seconds",
+               20,
+               "How frequently we'll write log entries in seconds.")
+       flag.IntVar(&minutesBetweenRuns,
+               "minutes-between-runs",
+               0,
+               "How many minutes we wait betwen data manager runs. 0 means run once and exit.")
+}
 
 func main() {
+       flag.Parse()
+       if minutesBetweenRuns == 0 {
+               singlerun()
+       } else {
+               waitTime := time.Minute * time.Duration(minutesBetweenRuns)
+               for {
+                       log.Println("Beginning Run")
+                       singlerun()
+                       log.Printf("Sleeping for %d minutes", minutesBetweenRuns)
+                       time.Sleep(waitTime)
+               }
+       }
+}
+
+func singlerun() {
        arv, err := arvadosclient.MakeArvadosClient()
        if err != nil {
                log.Fatalf("Error setting up arvados client %s", err.Error())
@@ -25,37 +63,75 @@ func main() {
                log.Fatalf("Current user is not an admin. Datamanager can only be run by admins.")
        }
 
-       readCollections := collection.GetCollections(
-               collection.GetCollectionsParams{
-                       Client: arv, Limit: 50, LogEveryNthCollectionProcessed: 10})
+       var arvLogger *logger.Logger
+       if logEventTypePrefix != "" {
+               arvLogger = logger.NewLogger(logger.LoggerParams{
+                       Client:          arv,
+                       EventTypePrefix: logEventTypePrefix,
+                       WriteInterval:   time.Second * time.Duration(logFrequencySeconds)})
+       }
 
-       //log.Printf("Read Collections: %v", readCollections)
+       loggerutil.LogRunInfo(arvLogger)
+       if arvLogger != nil {
+               arvLogger.AddWriteHook(loggerutil.LogMemoryAlloc)
+       }
 
-       // TODO(misha): Add a "readonly" flag. If we're in readonly mode,
-       // lots of behaviors can become warnings (and obviously we can't
-       // write anything).
-       // if !readCollections.ReadAllCollections {
-       //      log.Fatalf("Did not read all collections")
-       // }
+       var (
+               readCollections collection.ReadCollections
+               keepServerInfo  keep.ReadServers
+       )
 
-       log.Printf("Read and processed %d collections",
-               len(readCollections.UuidToCollection))
+       if !summary.MaybeReadData(arvLogger, &readCollections, &keepServerInfo) {
+               collectionChannel := make(chan collection.ReadCollections)
 
-       // TODO(misha): Send SDK and Keep requests in parallel
+               go func() {
+                       collectionChannel <- collection.GetCollectionsAndSummarize(
+                               collection.GetCollectionsParams{
+                                       Client:    arv,
+                                       Logger:    arvLogger,
+                                       BatchSize: 50})
+               }()
 
-       keepParams := arvadosclient.Dict{"limit": 1000}
-       var keepDisks map[string]interface{}
-       err = arv.List("keep_disks", keepParams, &keepDisks)
-       if err != nil {
-               log.Fatalf("Error requesting keep disks from API server: %v", err)
+               keepServerInfo = keep.GetKeepServersAndSummarize(
+                       keep.GetKeepServersParams{
+                               Client: arv,
+                               Logger: arvLogger,
+                               Limit:  1000})
+
+               readCollections = <-collectionChannel
        }
-       var retrievedAll bool
-       var numDisksReturned, numDisksAvailable int
-       if retrievedAll, numDisksReturned, numDisksAvailable =
-               util.SdkListResponseContainsAllAvailableItems(keepDisks); !retrievedAll {
-               log.Fatalf("Failed to retrieve all keep disks. Only received %d of %d",
-                       numDisksReturned, numDisksAvailable)
+
+       summary.MaybeWriteData(arvLogger, readCollections, keepServerInfo)
+
+       buckets := summary.BucketReplication(readCollections, keepServerInfo)
+       bucketCounts := buckets.Counts()
+
+       replicationSummary := buckets.SummarizeBuckets(readCollections)
+       replicationCounts := replicationSummary.ComputeCounts()
+
+       log.Printf("Blocks In Collections: %d, "+
+               "\nBlocks In Keep: %d.",
+               len(readCollections.BlockToReplication),
+               len(keepServerInfo.BlockToServers))
+       log.Println(replicationCounts.PrettyPrint())
+
+       log.Printf("Blocks Histogram:")
+       for _, rlbss := range bucketCounts {
+               log.Printf("%+v: %10d",
+                       rlbss.Levels,
+                       rlbss.Count)
        }
 
-       log.Printf("Returned %d keep disks", numDisksReturned)
+       // Log that we're finished. We force the recording, since go will
+       // not wait for the write timer before exiting.
+       if arvLogger != nil {
+               arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) {
+                       summaryInfo := logger.GetOrCreateMap(p, "summary_info")
+                       summaryInfo["block_replication_counts"] = bucketCounts
+                       summaryInfo["replication_summary"] = replicationCounts
+                       p["summary_info"] = summaryInfo
+
+                       p["run_info"].(map[string]interface{})["finished_at"] = time.Now()
+               })
+       }
 }