"git.curoverse.com/arvados.git/services/datamanager/collection"
"git.curoverse.com/arvados.git/services/datamanager/keep"
"git.curoverse.com/arvados.git/services/datamanager/loggerutil"
+ "git.curoverse.com/arvados.git/services/datamanager/summary"
"log"
"time"
)
var (
- logEventTypePrefix string
+ logEventTypePrefix string
logFrequencySeconds int
+ minutesBetweenRuns int
)
func init() {
"log-frequency-seconds",
20,
"How frequently we'll write log entries in seconds.")
+ flag.IntVar(&minutesBetweenRuns,
+ "minutes-between-runs",
+ 0,
+ "How many minutes we wait betwen data manager runs. 0 means run once and exit.")
}
func main() {
flag.Parse()
+ if minutesBetweenRuns == 0 {
+ singlerun()
+ } else {
+ waitTime := time.Minute * time.Duration(minutesBetweenRuns)
+ for {
+ log.Println("Beginning Run")
+ singlerun()
+ log.Printf("Sleeping for %d minutes", minutesBetweenRuns)
+ time.Sleep(waitTime)
+ }
+ }
+}
+func singlerun() {
arv, err := arvadosclient.MakeArvadosClient()
if err != nil {
log.Fatalf("Error setting up arvados client %s", err.Error())
var arvLogger *logger.Logger
if logEventTypePrefix != "" {
- arvLogger = logger.NewLogger(logger.LoggerParams{Client: arv,
- EventTypePrefix: logEventTypePrefix,
- WriteInterval: time.Second * time.Duration(logFrequencySeconds)})
+ arvLogger = logger.NewLogger(logger.LoggerParams{
+ Client: arv,
+ EventTypePrefix: logEventTypePrefix,
+ WriteInterval: time.Second * time.Duration(logFrequencySeconds)})
}
loggerutil.LogRunInfo(arvLogger)
arvLogger.AddWriteHook(loggerutil.LogMemoryAlloc)
}
- collectionChannel := make(chan collection.ReadCollections)
+ var (
+ readCollections collection.ReadCollections
+ keepServerInfo keep.ReadServers
+ )
+
+ if !summary.MaybeReadData(arvLogger, &readCollections, &keepServerInfo) {
+ collectionChannel := make(chan collection.ReadCollections)
+
+ go func() {
+ collectionChannel <- collection.GetCollectionsAndSummarize(
+ collection.GetCollectionsParams{
+ Client: arv,
+ Logger: arvLogger,
+ BatchSize: 50})
+ }()
+
+ keepServerInfo = keep.GetKeepServersAndSummarize(
+ keep.GetKeepServersParams{
+ Client: arv,
+ Logger: arvLogger,
+ Limit: 1000})
+
+ readCollections = <-collectionChannel
+ }
+
+ summary.MaybeWriteData(arvLogger, readCollections, keepServerInfo)
- go func() {
- collectionChannel <- collection.GetCollectionsAndSummarize(
- collection.GetCollectionsParams{
- Client: arv, Logger: arvLogger, BatchSize: 50})
- }()
+ buckets := summary.BucketReplication(readCollections, keepServerInfo)
+ bucketCounts := buckets.Counts()
- keepServerInfo := keep.GetKeepServersAndSummarize(
- keep.GetKeepServersParams{Client: arv, Logger: arvLogger, Limit: 1000})
+ replicationSummary := buckets.SummarizeBuckets(readCollections)
+ replicationCounts := replicationSummary.ComputeCounts()
- readCollections := <-collectionChannel
+ log.Printf("Blocks In Collections: %d, "+
+ "\nBlocks In Keep: %d.",
+ len(readCollections.BlockToReplication),
+ len(keepServerInfo.BlockToServers))
+ log.Println(replicationCounts.PrettyPrint())
- // TODO(misha): Use these together to verify replication.
- _ = readCollections
- _ = keepServerInfo
+ log.Printf("Blocks Histogram:")
+ for _, rlbss := range bucketCounts {
+ log.Printf("%+v: %10d",
+ rlbss.Levels,
+ rlbss.Count)
+ }
// Log that we're finished. We force the recording, since go will
- // not wait for the timer before exiting.
+ // not wait for the write timer before exiting.
if arvLogger != nil {
arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) {
+ summaryInfo := logger.GetOrCreateMap(p, "summary_info")
+ summaryInfo["block_replication_counts"] = bucketCounts
+ summaryInfo["replication_summary"] = replicationCounts
+ p["summary_info"] = summaryInfo
+
p["run_info"].(map[string]interface{})["finished_at"] = time.Now()
})
}