X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/9c8bad50218730ac2c640f8e1c00fd12f2f174e4..27daf08f38eec505c224e7776678b32d50241e13:/services/datamanager/datamanager.go diff --git a/services/datamanager/datamanager.go b/services/datamanager/datamanager.go index a8e506eacb..70a9ae7859 100644 --- a/services/datamanager/datamanager.go +++ b/services/datamanager/datamanager.go @@ -4,12 +4,15 @@ package main import ( "flag" + "fmt" "git.curoverse.com/arvados.git/sdk/go/arvadosclient" + "git.curoverse.com/arvados.git/sdk/go/keepclient" "git.curoverse.com/arvados.git/sdk/go/logger" "git.curoverse.com/arvados.git/sdk/go/util" "git.curoverse.com/arvados.git/services/datamanager/collection" "git.curoverse.com/arvados.git/services/datamanager/keep" "git.curoverse.com/arvados.git/services/datamanager/loggerutil" + "git.curoverse.com/arvados.git/services/datamanager/summary" "log" "time" ) @@ -38,19 +41,25 @@ func init() { func main() { flag.Parse() if minutesBetweenRuns == 0 { - singlerun() + err := singlerun() + if err != nil { + log.Fatalf("Got an error: %v", err) + } } else { waitTime := time.Minute * time.Duration(minutesBetweenRuns) for { log.Println("Beginning Run") - singlerun() + err := singlerun() + if err != nil { + log.Printf("Got an error: %v", err) + } log.Printf("Sleeping for %d minutes", minutesBetweenRuns) time.Sleep(waitTime) } } } -func singlerun() { +func singlerun() error { arv, err := arvadosclient.MakeArvadosClient() if err != nil { log.Fatalf("Error setting up arvados client %s", err.Error()) @@ -64,7 +73,8 @@ func singlerun() { var arvLogger *logger.Logger if logEventTypePrefix != "" { - arvLogger = logger.NewLogger(logger.LoggerParams{Client: arv, + arvLogger = logger.NewLogger(logger.LoggerParams{ + Client: arv, EventTypePrefix: logEventTypePrefix, WriteInterval: time.Second * time.Duration(logFrequencySeconds)}) } @@ -74,28 +84,103 @@ func singlerun() { arvLogger.AddWriteHook(loggerutil.LogMemoryAlloc) } - collectionChannel := make(chan collection.ReadCollections) + var ( + dataFetcher summary.DataFetcher + readCollections collection.ReadCollections + keepServerInfo keep.ReadServers + ) + + if summary.ShouldReadData() { + dataFetcher = summary.ReadData + } else { + dataFetcher = BuildDataFetcher(arv) + } - go func() { - collectionChannel <- collection.GetCollectionsAndSummarize( - collection.GetCollectionsParams{ - Client: arv, Logger: arvLogger, BatchSize: 50}) - }() + dataFetcher(arvLogger, &readCollections, &keepServerInfo) - keepServerInfo := keep.GetKeepServersAndSummarize( - keep.GetKeepServersParams{Client: arv, Logger: arvLogger, Limit: 1000}) + summary.MaybeWriteData(arvLogger, readCollections, keepServerInfo) - readCollections := <-collectionChannel + buckets := summary.BucketReplication(readCollections, keepServerInfo) + bucketCounts := buckets.Counts() - // TODO(misha): Use these together to verify replication. - _ = readCollections - _ = keepServerInfo + replicationSummary := buckets.SummarizeBuckets(readCollections) + replicationCounts := replicationSummary.ComputeCounts() + + log.Printf("Blocks In Collections: %d, "+ + "\nBlocks In Keep: %d.", + len(readCollections.BlockToDesiredReplication), + len(keepServerInfo.BlockToServers)) + log.Println(replicationCounts.PrettyPrint()) + + log.Printf("Blocks Histogram:") + for _, rlbss := range bucketCounts { + log.Printf("%+v: %10d", + rlbss.Levels, + rlbss.Count) + } + + kc, err := keepclient.MakeKeepClient(&arv) + if err != nil { + loggerutil.FatalWithMessage(arvLogger, + fmt.Sprintf("Error setting up keep client %s", err.Error())) + } // Log that we're finished. We force the recording, since go will - // not wait for the timer before exiting. + // not wait for the write timer before exiting. if arvLogger != nil { - arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) { + defer arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) { + summaryInfo := logger.GetOrCreateMap(p, "summary_info") + summaryInfo["block_replication_counts"] = bucketCounts + summaryInfo["replication_summary"] = replicationCounts + p["summary_info"] = summaryInfo + p["run_info"].(map[string]interface{})["finished_at"] = time.Now() }) } + + pullServers := summary.ComputePullServers(kc, + &keepServerInfo, + readCollections.BlockToDesiredReplication, + replicationSummary.UnderReplicatedBlocks) + + pullLists := summary.BuildPullLists(pullServers) + + trashLists, trashErr := summary.BuildTrashLists(kc, + &keepServerInfo, + replicationSummary.KeepBlocksNotInCollections) + + summary.WritePullLists(arvLogger, pullLists) + + if trashErr != nil { + return err + } else { + keep.SendTrashLists(keep.GetDataManagerToken(arvLogger), kc, trashLists) + } + + return nil +} + +// Returns a data fetcher that fetches data from remote servers. +func BuildDataFetcher(arv arvadosclient.ArvadosClient) summary.DataFetcher { + return func(arvLogger *logger.Logger, + readCollections *collection.ReadCollections, + keepServerInfo *keep.ReadServers) { + collectionChannel := make(chan collection.ReadCollections) + + go func() { + collectionChannel <- collection.GetCollectionsAndSummarize( + collection.GetCollectionsParams{ + Client: arv, + Logger: arvLogger, + BatchSize: 50}) + }() + + *keepServerInfo = keep.GetKeepServersAndSummarize( + keep.GetKeepServersParams{ + Client: arv, + Logger: arvLogger, + Limit: 1000}) + + *readCollections = <-collectionChannel + } }