package main
import (
+ "errors"
"flag"
+ "fmt"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+ "git.curoverse.com/arvados.git/sdk/go/keepclient"
"git.curoverse.com/arvados.git/sdk/go/logger"
"git.curoverse.com/arvados.git/sdk/go/util"
"git.curoverse.com/arvados.git/services/datamanager/collection"
"git.curoverse.com/arvados.git/services/datamanager/keep"
+ "git.curoverse.com/arvados.git/services/datamanager/loggerutil"
+ "git.curoverse.com/arvados.git/services/datamanager/summary"
"log"
- "os"
- "runtime"
"time"
)
var (
- logEventType string
+ logEventTypePrefix string
logFrequencySeconds int
+ minutesBetweenRuns int
+ collectionBatchSize int
+ dryRun bool
)
func init() {
- flag.StringVar(&logEventType,
- "log-event-type",
- "experimental-data-manager-report",
- "event_type to use in our arvados log entries. Set to empty to turn off logging")
- flag.IntVar(&logFrequencySeconds,
+ flag.StringVar(&logEventTypePrefix,
+ "log-event-type-prefix",
+ "experimental-data-manager",
+ "Prefix to use in the event_type of our arvados log entries. Set to empty to turn off logging")
+ flag.IntVar(&logFrequencySeconds,
"log-frequency-seconds",
20,
"How frequently we'll write log entries in seconds.")
+ flag.IntVar(&minutesBetweenRuns,
+ "minutes-between-runs",
+ 0,
+ "How many minutes we wait between data manager runs. 0 means run once and exit.")
+ flag.IntVar(&collectionBatchSize,
+ "collection-batch-size",
+ 1000,
+ "How many collections to request in each batch.")
+ flag.BoolVar(&dryRun,
+ "dry-run",
+ false,
+ "Perform a dry run. Log how many blocks would be deleted/moved, but do not issue any changes to keepstore.")
}
func main() {
flag.Parse()
- arv, err := arvadosclient.MakeArvadosClient()
- if err != nil {
- log.Fatalf("Error setting up arvados client %s", err.Error())
+ if minutesBetweenRuns == 0 {
+ arv, err := arvadosclient.MakeArvadosClient()
+ if err != nil {
+ loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error making arvados client: %v", err))
+ }
+ err = singlerun(arv)
+ if err != nil {
+ loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("singlerun: %v", err))
+ }
+ } else {
+ waitTime := time.Minute * time.Duration(minutesBetweenRuns)
+ for {
+ log.Println("Beginning Run")
+ arv, err := arvadosclient.MakeArvadosClient()
+ if err != nil {
+ loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error making arvados client: %v", err))
+ }
+ err = singlerun(arv)
+ if err != nil {
+ log.Printf("singlerun: %v", err)
+ }
+ log.Printf("Sleeping for %d minutes", minutesBetweenRuns)
+ time.Sleep(waitTime)
+ }
}
+}
+
+var arvLogger *logger.Logger
- if is_admin, err := util.UserIsAdmin(arv); err != nil {
- log.Fatalf("Error querying current arvados user %s", err.Error())
- } else if !is_admin {
- log.Fatalf("Current user is not an admin. Datamanager can only be run by admins.")
+func singlerun(arv arvadosclient.ArvadosClient) error {
+ var err error
+ if isAdmin, err := util.UserIsAdmin(arv); err != nil {
+ return errors.New("Error verifying admin token: " + err.Error())
+ } else if !isAdmin {
+ return errors.New("Current user is not an admin. Datamanager requires a privileged token.")
}
- var arvLogger *logger.Logger
- if logEventType != "" {
- arvLogger = logger.NewLogger(logger.LoggerParams{Client: arv,
- EventType: logEventType,
- MinimumWriteInterval: time.Second * time.Duration(logFrequencySeconds)})
+ if logEventTypePrefix != "" {
+ arvLogger, err = logger.NewLogger(logger.LoggerParams{
+ Client: arv,
+ EventTypePrefix: logEventTypePrefix,
+ WriteInterval: time.Second * time.Duration(logFrequencySeconds)})
}
+ loggerutil.LogRunInfo(arvLogger)
if arvLogger != nil {
- properties, _ := arvLogger.Edit()
- runInfo := make(map[string]interface{})
- runInfo["start_time"] = time.Now()
- runInfo["args"] = os.Args
- hostname, err := os.Hostname()
- if err != nil {
- runInfo["hostname_error"] = err.Error()
- } else {
- runInfo["hostname"] = hostname
- }
- runInfo["pid"] = os.Getpid()
- properties["run_info"] = runInfo
+ arvLogger.AddWriteHook(loggerutil.LogMemoryAlloc)
+ }
- arvLogger.AddWriteHook(LogMemoryAlloc)
+ var (
+ dataFetcher summary.DataFetcher
+ readCollections collection.ReadCollections
+ keepServerInfo keep.ReadServers
+ )
- arvLogger.Record()
+ if summary.ShouldReadData() {
+ dataFetcher = summary.ReadData
+ } else {
+ dataFetcher = BuildDataFetcher(arv)
}
- collectionChannel := make(chan collection.ReadCollections)
+ err = dataFetcher(arvLogger, &readCollections, &keepServerInfo)
+ if err != nil {
+ return err
+ }
- go func() { collectionChannel <- collection.GetCollectionsAndSummarize(
- collection.GetCollectionsParams{
- Client: arv, Logger: arvLogger, BatchSize: 50}) }()
+ err = summary.MaybeWriteData(arvLogger, readCollections, keepServerInfo)
+ if err != nil {
+ return err
+ }
- keepServerInfo := keep.GetKeepServersAndSummarize(
- keep.GetKeepServersParams{Client: arv, Limit: 1000})
+ buckets := summary.BucketReplication(readCollections, keepServerInfo)
+ bucketCounts := buckets.Counts()
- readCollections := <-collectionChannel
+ replicationSummary := buckets.SummarizeBuckets(readCollections)
+ replicationCounts := replicationSummary.ComputeCounts()
- // Make compiler happy.
- _ = readCollections
- _ = keepServerInfo
+ log.Printf("Blocks In Collections: %d, "+
+ "\nBlocks In Keep: %d.",
+ len(readCollections.BlockToDesiredReplication),
+ len(keepServerInfo.BlockToServers))
+ log.Println(replicationCounts.PrettyPrint())
- // Log that we're finished
+ log.Printf("Blocks Histogram:")
+ for _, rlbss := range bucketCounts {
+ log.Printf("%+v: %10d",
+ rlbss.Levels,
+ rlbss.Count)
+ }
+
+ kc, err := keepclient.MakeKeepClient(&arv)
+ if err != nil {
+ return fmt.Errorf("Error setting up keep client %v", err.Error())
+ }
+
+ // Log that we're finished. We force the recording, since go will
+ // not wait for the write timer before exiting.
if arvLogger != nil {
- properties,_ := arvLogger.Edit()
- properties["run_info"].(map[string]interface{})["end_time"] = time.Now()
- // Force the recording, since go will not wait for the timer before exiting.
- arvLogger.ForceRecord()
+ defer arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) {
+ summaryInfo := logger.GetOrCreateMap(p, "summary_info")
+ summaryInfo["block_replication_counts"] = bucketCounts
+ summaryInfo["replication_summary"] = replicationCounts
+ p["summary_info"] = summaryInfo
+
+ p["run_info"].(map[string]interface{})["finished_at"] = time.Now()
+ })
}
+
+ pullServers := summary.ComputePullServers(kc,
+ &keepServerInfo,
+ readCollections.BlockToDesiredReplication,
+ replicationSummary.UnderReplicatedBlocks)
+
+ pullLists := summary.BuildPullLists(pullServers)
+
+ trashLists, trashErr := summary.BuildTrashLists(kc,
+ &keepServerInfo,
+ replicationSummary.KeepBlocksNotInCollections)
+
+ err = summary.WritePullLists(arvLogger, pullLists, dryRun)
+ if err != nil {
+ return err
+ }
+
+ if trashErr != nil {
+ return err
+ }
+ keep.SendTrashLists(arvLogger, kc, trashLists, dryRun)
+
+ return nil
}
-func LogMemoryAlloc(properties map[string]interface{}, entry map[string]interface{}) {
- _ = entry // keep the compiler from complaining
- runInfo := properties["run_info"].(map[string]interface{})
- var memStats runtime.MemStats
- runtime.ReadMemStats(&memStats)
- runInfo["alloc_bytes_in_use"] = memStats.Alloc
+// BuildDataFetcher returns a data fetcher that fetches data from remote servers.
+func BuildDataFetcher(arv arvadosclient.ArvadosClient) summary.DataFetcher {
+ return func(
+ arvLogger *logger.Logger,
+ readCollections *collection.ReadCollections,
+ keepServerInfo *keep.ReadServers,
+ ) error {
+ collDone := make(chan struct{})
+ var collErr error
+ go func() {
+ *readCollections, collErr = collection.GetCollectionsAndSummarize(
+ collection.GetCollectionsParams{
+ Client: arv,
+ Logger: arvLogger,
+ BatchSize: collectionBatchSize})
+ collDone <- struct{}{}
+ }()
+
+ var keepErr error
+ *keepServerInfo, keepErr = keep.GetKeepServersAndSummarize(
+ keep.GetKeepServersParams{
+ Client: arv,
+ Logger: arvLogger,
+ Limit: 1000})
+
+ <-collDone
+
+ // Return a nil error only if both parts succeeded.
+ if collErr != nil {
+ return collErr
+ }
+ return keepErr
+ }
}