X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/b0b276ff6121aace3c52ee855752df6852120343..6613ec1e9c705fb5b950611fd160d4a2babed251:/services/datamanager/datamanager.go diff --git a/services/datamanager/datamanager.go b/services/datamanager/datamanager.go index 28d558bb8d..5250d175ff 100644 --- a/services/datamanager/datamanager.go +++ b/services/datamanager/datamanager.go @@ -3,6 +3,7 @@ package main import ( + "errors" "flag" "fmt" "git.curoverse.com/arvados.git/sdk/go/arvadosclient" @@ -21,6 +22,8 @@ var ( logEventTypePrefix string logFrequencySeconds int minutesBetweenRuns int + collectionBatchSize int + dryRun bool ) func init() { @@ -35,39 +38,59 @@ func init() { flag.IntVar(&minutesBetweenRuns, "minutes-between-runs", 0, - "How many minutes we wait betwen data manager runs. 0 means run once and exit.") + "How many minutes we wait between data manager runs. 0 means run once and exit.") + flag.IntVar(&collectionBatchSize, + "collection-batch-size", + 1000, + "How many collections to request in each batch.") + flag.BoolVar(&dryRun, + "dry-run", + false, + "Perform a dry run. Log how many blocks would be deleted/moved, but do not issue any changes to keepstore.") } func main() { flag.Parse() + if minutesBetweenRuns == 0 { - singlerun() + arv, err := arvadosclient.MakeArvadosClient() + if err != nil { + loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error making arvados client: %v", err)) + } + err = singlerun(arv) + if err != nil { + loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("singlerun: %v", err)) + } } else { waitTime := time.Minute * time.Duration(minutesBetweenRuns) for { log.Println("Beginning Run") - singlerun() + arv, err := arvadosclient.MakeArvadosClient() + if err != nil { + loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf("Error making arvados client: %v", err)) + } + err = singlerun(arv) + if err != nil { + log.Printf("singlerun: %v", err) + } log.Printf("Sleeping for %d minutes", minutesBetweenRuns) time.Sleep(waitTime) } } } -func singlerun() { - arv, err := arvadosclient.MakeArvadosClient() - if err != nil { - log.Fatalf("Error setting up arvados client %s", err.Error()) - } +var arvLogger *logger.Logger - if is_admin, err := util.UserIsAdmin(arv); err != nil { - log.Fatalf("Error querying current arvados user %s", err.Error()) - } else if !is_admin { - log.Fatalf("Current user is not an admin. Datamanager can only be run by admins.") +func singlerun(arv *arvadosclient.ArvadosClient) error { + var err error + if isAdmin, err := util.UserIsAdmin(arv); err != nil { + return errors.New("Error verifying admin token: " + err.Error()) + } else if !isAdmin { + return errors.New("Current user is not an admin. Datamanager requires a privileged token.") } - var arvLogger *logger.Logger if logEventTypePrefix != "" { - arvLogger = logger.NewLogger(logger.LoggerParams{ + arvLogger, err = logger.NewLogger(logger.LoggerParams{ Client: arv, EventTypePrefix: logEventTypePrefix, WriteInterval: time.Second * time.Duration(logFrequencySeconds)}) @@ -90,9 +113,15 @@ func singlerun() { dataFetcher = BuildDataFetcher(arv) } - dataFetcher(arvLogger, &readCollections, &keepServerInfo) + err = dataFetcher(arvLogger, &readCollections, &keepServerInfo) + if err != nil { + return err + } - summary.MaybeWriteData(arvLogger, readCollections, keepServerInfo) + err = summary.MaybeWriteData(arvLogger, readCollections, keepServerInfo) + if err != nil { + return err + } buckets := summary.BucketReplication(readCollections, keepServerInfo) bucketCounts := buckets.Counts() @@ -113,30 +142,15 @@ func singlerun() { rlbss.Count) } - kc, err := keepclient.MakeKeepClient(&arv) + kc, err := keepclient.MakeKeepClient(arv) if err != nil { - loggerutil.FatalWithMessage(arvLogger, - fmt.Sprintf("Error setting up keep client %s", err.Error())) + return fmt.Errorf("Error setting up keep client %v", err.Error()) } - pullServers := summary.ComputePullServers(kc, - &keepServerInfo, - readCollections.BlockToDesiredReplication, - replicationSummary.UnderReplicatedBlocks) - - pullLists := summary.BuildPullLists(pullServers) - trashLists := summary.BuildTrashLists(kc, - &keepServerInfo, - replicationSummary.KeepBlocksNotInCollections) - - summary.WritePullLists(arvLogger, pullLists) - - keep.SendTrashLists(arvLogger, kc, trashLists) - // Log that we're finished. We force the recording, since go will // not wait for the write timer before exiting. if arvLogger != nil { - arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) { + defer arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) { summaryInfo := logger.GetOrCreateMap(p, "summary_info") summaryInfo["block_replication_counts"] = bucketCounts summaryInfo["replication_summary"] = replicationCounts @@ -145,29 +159,62 @@ func singlerun() { p["run_info"].(map[string]interface{})["finished_at"] = time.Now() }) } + + pullServers := summary.ComputePullServers(kc, + &keepServerInfo, + readCollections.BlockToDesiredReplication, + replicationSummary.UnderReplicatedBlocks) + + pullLists := summary.BuildPullLists(pullServers) + + trashLists, trashErr := summary.BuildTrashLists(kc, + &keepServerInfo, + replicationSummary.KeepBlocksNotInCollections) + + err = summary.WritePullLists(arvLogger, pullLists, dryRun) + if err != nil { + return err + } + + if trashErr != nil { + return err + } + keep.SendTrashLists(arvLogger, kc, trashLists, dryRun) + + return nil } -// Returns a data fetcher that fetches data from remote servers. -func BuildDataFetcher(arv arvadosclient.ArvadosClient) summary.DataFetcher { - return func(arvLogger *logger.Logger, +// BuildDataFetcher returns a data fetcher that fetches data from remote servers. +func BuildDataFetcher(arv *arvadosclient.ArvadosClient) summary.DataFetcher { + return func( + arvLogger *logger.Logger, readCollections *collection.ReadCollections, - keepServerInfo *keep.ReadServers) { - collectionChannel := make(chan collection.ReadCollections) - + keepServerInfo *keep.ReadServers, + ) error { + collDone := make(chan struct{}) + var collErr error go func() { - collectionChannel <- collection.GetCollectionsAndSummarize( + *readCollections, collErr = collection.GetCollectionsAndSummarize( collection.GetCollectionsParams{ Client: arv, Logger: arvLogger, - BatchSize: 50}) + BatchSize: collectionBatchSize}) + collDone <- struct{}{} }() - *keepServerInfo = keep.GetKeepServersAndSummarize( + var keepErr error + *keepServerInfo, keepErr = keep.GetKeepServersAndSummarize( keep.GetKeepServersParams{ Client: arv, Logger: arvLogger, Limit: 1000}) - *readCollections = <-collectionChannel + <-collDone + + // Return a nil error only if both parts succeeded. + if collErr != nil { + return collErr + } + return keepErr } }