X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/e0889a8f6997327fd9b4d826237c8166cf909741..5824ee2e5198dd46a7813fe2adbd380a114f9ac4:/services/datamanager/collection/collection.go diff --git a/services/datamanager/collection/collection.go b/services/datamanager/collection/collection.go index fbdec1516a..e0929b7757 100644 --- a/services/datamanager/collection/collection.go +++ b/services/datamanager/collection/collection.go @@ -1,4 +1,4 @@ -/* Deals with parsing Collection responses from API Server. */ +// Deals with parsing Collection responses from API Server. package collection @@ -9,70 +9,69 @@ import ( "git.curoverse.com/arvados.git/sdk/go/blockdigest" "git.curoverse.com/arvados.git/sdk/go/logger" "git.curoverse.com/arvados.git/sdk/go/manifest" + "git.curoverse.com/arvados.git/sdk/go/util" "git.curoverse.com/arvados.git/services/datamanager/loggerutil" "log" "os" - "runtime" "runtime/pprof" "time" ) var ( - heap_profile_filename string + heapProfileFilename string // globals for debugging totalManifestSize uint64 - maxManifestSize uint64 + maxManifestSize uint64 +) + +const ( + DefaultReplicationLevel = 2 ) type Collection struct { - Uuid string - OwnerUuid string - ReplicationLevel int + Uuid string + OwnerUuid string + ReplicationLevel int BlockDigestToSize map[blockdigest.BlockDigest]int - TotalSize int + TotalSize int } type ReadCollections struct { - ReadAllCollections bool - UuidToCollection map[string]Collection - OwnerToCollectionSize map[string]int + ReadAllCollections bool + UuidToCollection map[string]Collection + OwnerToCollectionSize map[string]int + BlockToReplication map[blockdigest.BlockDigest]int + CollectionUuidToIndex map[string]int + CollectionIndexToUuid []string + BlockToCollectionIndices map[blockdigest.BlockDigest][]int } type GetCollectionsParams struct { - Client arvadosclient.ArvadosClient - Logger *logger.Logger + Client arvadosclient.ArvadosClient + Logger *logger.Logger BatchSize int } type SdkCollectionInfo struct { - Uuid string `json:"uuid"` - OwnerUuid string `json:"owner_uuid"` - Redundancy int `json:"redundancy"` - ModifiedAt time.Time `json:"modified_at"` - ManifestText string `json:"manifest_text"` + Uuid string `json:"uuid"` + OwnerUuid string `json:"owner_uuid"` + Redundancy int `json:"redundancy"` + ModifiedAt time.Time `json:"modified_at"` + ManifestText string `json:"manifest_text"` } type SdkCollectionList struct { - ItemsAvailable int `json:"items_available"` - Items []SdkCollectionInfo `json:"items"` + ItemsAvailable int `json:"items_available"` + Items []SdkCollectionInfo `json:"items"` } func init() { - flag.StringVar(&heap_profile_filename, + flag.StringVar(&heapProfileFilename, "heap-profile", "", "File to write the heap profiles to. Leave blank to skip profiling.") } -// // Methods to implement util.SdkListResponse Interface -// func (s SdkCollectionList) NumItemsAvailable() (numAvailable int, err error) { -// return s.ItemsAvailable, nil -// } - -// func (s SdkCollectionList) NumItemsContained() (numContained int, err error) { -// return len(s.Items), nil -// } - // Write the heap profile to a file for later review. // Since a file is expected to only contain a single heap profile this // function overwrites the previously written profile, so it is safe @@ -80,9 +79,9 @@ func init() { // Otherwise we would see cumulative numbers as explained here: // https://groups.google.com/d/msg/golang-nuts/ZyHciRglQYc/2nh4Ndu2fZcJ func WriteHeapProfile() { - if heap_profile_filename != "" { + if heapProfileFilename != "" { - heap_profile, err := os.Create(heap_profile_filename) + heap_profile, err := os.Create(heapProfileFilename) if err != nil { log.Fatal(err) } @@ -96,17 +95,9 @@ func WriteHeapProfile() { } } - func GetCollectionsAndSummarize(params GetCollectionsParams) (results ReadCollections) { results = GetCollections(params) - ComputeSizeOfOwnedCollections(&results) - - if params.Logger != nil { - properties,_ := params.Logger.Edit() - collectionInfo := properties["collection_info"].(map[string]interface{}) - collectionInfo["owner_to_collection_size"] = results.OwnerToCollectionSize - params.Logger.Record() - } + results.Summarize(params.Logger) log.Printf("Uuid to Size used: %v", results.OwnerToCollectionSize) log.Printf("Read and processed %d collections", @@ -131,20 +122,24 @@ func GetCollections(params GetCollectionsParams) (results ReadCollections) { fieldsWanted := []string{"manifest_text", "owner_uuid", "uuid", - // TODO(misha): Start using the redundancy field. "redundancy", "modified_at"} sdkParams := arvadosclient.Dict{ - "select": fieldsWanted, - "order": []string{"modified_at ASC"}, + "select": fieldsWanted, + "order": []string{"modified_at ASC"}, "filters": [][]string{[]string{"modified_at", ">=", "1900-01-01T00:00:00Z"}}} if params.BatchSize > 0 { sdkParams["limit"] = params.BatchSize } - initialNumberOfCollectionsAvailable := NumberCollectionsAvailable(params.Client) + initialNumberOfCollectionsAvailable, err := + util.NumberItemsAvailable(params.Client, "collections") + if err != nil { + loggerutil.FatalWithMessage(params.Logger, + fmt.Sprintf("Error querying collection count: %v", err)) + } // Include a 1% margin for collections added while we're reading so // that we don't have to grow the map in most cases. maxExpectedCollections := int( @@ -152,12 +147,11 @@ func GetCollections(params GetCollectionsParams) (results ReadCollections) { results.UuidToCollection = make(map[string]Collection, maxExpectedCollections) if params.Logger != nil { - properties,_ := params.Logger.Edit() - collectionInfo := make(map[string]interface{}) - collectionInfo["num_collections_at_start"] = initialNumberOfCollectionsAvailable - collectionInfo["batch_size"] = params.BatchSize - properties["collection_info"] = collectionInfo - params.Logger.Record() + params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) { + collectionInfo := logger.GetOrCreateMap(p, "collection_info") + collectionInfo["num_collections_at_start"] = initialNumberOfCollectionsAvailable + collectionInfo["batch_size"] = params.BatchSize + }) } // These values are just for getting the loop to run the first time, @@ -181,42 +175,38 @@ func GetCollections(params GetCollectionsParams) (results ReadCollections) { // Process collection and update our date filter. sdkParams["filters"].([][]string)[0][2] = ProcessCollections(params.Logger, - collections.Items, - results.UuidToCollection).Format(time.RFC3339) + collections.Items, + results.UuidToCollection).Format(time.RFC3339) // update counts previousTotalCollections = totalCollections totalCollections = len(results.UuidToCollection) - log.Printf("%d collections read, %d new in last batch, " + + log.Printf("%d collections read, %d new in last batch, "+ "%s latest modified date, %.0f %d %d avg,max,total manifest size", totalCollections, - totalCollections - previousTotalCollections, + totalCollections-previousTotalCollections, sdkParams["filters"].([][]string)[0][2], float32(totalManifestSize)/float32(totalCollections), maxManifestSize, totalManifestSize) if params.Logger != nil { - properties,_ := params.Logger.Edit() - collectionInfo := properties["collection_info"].(map[string]interface{}) - collectionInfo["collections_read"] = totalCollections - collectionInfo["latest_modified_date_seen"] = sdkParams["filters"].([][]string)[0][2] - collectionInfo["total_manifest_size"] = totalManifestSize - collectionInfo["max_manifest_size"] = maxManifestSize - params.Logger.Record() + params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) { + collectionInfo := logger.GetOrCreateMap(p, "collection_info") + collectionInfo["collections_read"] = totalCollections + collectionInfo["latest_modified_date_seen"] = sdkParams["filters"].([][]string)[0][2] + collectionInfo["total_manifest_size"] = totalManifestSize + collectionInfo["max_manifest_size"] = maxManifestSize + }) } } - // Just in case this lowers the numbers reported in the heap profile. - runtime.GC() - // Write the heap profile for examining memory usage WriteHeapProfile() return } - // StrCopy returns a newly allocated string. // It is useful to copy slices so that the garbage collector can reuse // the memory of the longer strings they came from. @@ -224,29 +214,33 @@ func StrCopy(s string) string { return string([]byte(s)) } - func ProcessCollections(arvLogger *logger.Logger, receivedCollections []SdkCollectionInfo, uuidToCollection map[string]Collection) (latestModificationDate time.Time) { for _, sdkCollection := range receivedCollections { collection := Collection{Uuid: StrCopy(sdkCollection.Uuid), - OwnerUuid: StrCopy(sdkCollection.OwnerUuid), - ReplicationLevel: sdkCollection.Redundancy, + OwnerUuid: StrCopy(sdkCollection.OwnerUuid), + ReplicationLevel: sdkCollection.Redundancy, BlockDigestToSize: make(map[blockdigest.BlockDigest]int)} if sdkCollection.ModifiedAt.IsZero() { loggerutil.FatalWithMessage(arvLogger, fmt.Sprintf( - "Arvados SDK collection returned with unexpected zero " + - "modifcation date. This probably means that either we failed to " + - "parse the modification date or the API server has changed how " + - "it returns modification dates: %v", + "Arvados SDK collection returned with unexpected zero "+ + "modification date. This probably means that either we failed to "+ + "parse the modification date or the API server has changed how "+ + "it returns modification dates: %+v", collection)) } if sdkCollection.ModifiedAt.After(latestModificationDate) { latestModificationDate = sdkCollection.ModifiedAt } + + if collection.ReplicationLevel == 0 { + collection.ReplicationLevel = DefaultReplicationLevel + } + manifest := manifest.Manifest{sdkCollection.ManifestText} manifestSize := uint64(len(sdkCollection.ManifestText)) @@ -256,11 +250,10 @@ func ProcessCollections(arvLogger *logger.Logger, if manifestSize > maxManifestSize { maxManifestSize = manifestSize } - + blockChannel := manifest.BlockIterWithDuplicates() for block := range blockChannel { - if stored_size, stored := collection.BlockDigestToSize[block.Digest]; - stored && stored_size != block.Size { + if stored_size, stored := collection.BlockDigestToSize[block.Digest]; stored && stored_size != block.Size { message := fmt.Sprintf( "Collection %s contains multiple sizes (%d and %d) for block %s", collection.Uuid, @@ -286,24 +279,44 @@ func ProcessCollections(arvLogger *logger.Logger, return } - -func NumberCollectionsAvailable(client arvadosclient.ArvadosClient) (int) { - var collections SdkCollectionList - sdkParams := arvadosclient.Dict{"limit": 0} - err := client.List("collections", sdkParams, &collections) - if err != nil { - log.Fatalf("error querying collections for items available: %v", err) - } - - return collections.ItemsAvailable -} - - -func ComputeSizeOfOwnedCollections(readCollections *ReadCollections) { +func (readCollections *ReadCollections) Summarize(arvLogger *logger.Logger) { readCollections.OwnerToCollectionSize = make(map[string]int) + readCollections.BlockToReplication = make(map[blockdigest.BlockDigest]int) + numCollections := len(readCollections.UuidToCollection) + readCollections.CollectionUuidToIndex = make(map[string]int, numCollections) + readCollections.CollectionIndexToUuid = make([]string, 0, numCollections) + readCollections.BlockToCollectionIndices = make(map[blockdigest.BlockDigest][]int) + for _, coll := range readCollections.UuidToCollection { + collectionIndex := len(readCollections.CollectionIndexToUuid) + readCollections.CollectionIndexToUuid = + append(readCollections.CollectionIndexToUuid, coll.Uuid) + readCollections.CollectionUuidToIndex[coll.Uuid] = collectionIndex + readCollections.OwnerToCollectionSize[coll.OwnerUuid] = readCollections.OwnerToCollectionSize[coll.OwnerUuid] + coll.TotalSize + + for block, _ := range coll.BlockDigestToSize { + readCollections.BlockToCollectionIndices[block] = + append(readCollections.BlockToCollectionIndices[block], collectionIndex) + storedReplication := readCollections.BlockToReplication[block] + if coll.ReplicationLevel > storedReplication { + readCollections.BlockToReplication[block] = coll.ReplicationLevel + } + } + } + + if arvLogger != nil { + arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) { + collectionInfo := logger.GetOrCreateMap(p, "collection_info") + // Since maps are shallow copied, we run a risk of concurrent + // updates here. By copying results.OwnerToCollectionSize into + // the log, we're assuming that it won't be updated. + collectionInfo["owner_to_collection_size"] = + readCollections.OwnerToCollectionSize + collectionInfo["distinct_blocks_named"] = + len(readCollections.BlockToReplication) + }) } return