Merge branch '8484-sanity-check-collection-count' closes #8484
[arvados.git] / services / datamanager / collection / collection.go
index b15864e300aa4422169a802359f91194bfb76828..9b7eb7543a4ebed086aba2d409f44fcc789ef222 100644 (file)
@@ -1,4 +1,4 @@
-/* Deals with parsing Collection responses from API Server. */
+// Deals with parsing Collection responses from API Server.
 
 package collection
 
@@ -10,107 +10,99 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/logger"
        "git.curoverse.com/arvados.git/sdk/go/manifest"
        "git.curoverse.com/arvados.git/sdk/go/util"
-       "git.curoverse.com/arvados.git/services/datamanager/loggerutil"
        "log"
        "os"
-       "runtime"
        "runtime/pprof"
        "time"
 )
 
 var (
-       heapProfileFilename string
-       // globals for debugging
-       totalManifestSize uint64
-       maxManifestSize   uint64
-)
-
-const (
-       DefaultReplicationLevel = 2
+       HeapProfileFilename string
 )
 
+// Collection representation
 type Collection struct {
-       Uuid              string
-       OwnerUuid         string
+       UUID              string
+       OwnerUUID         string
        ReplicationLevel  int
        BlockDigestToSize map[blockdigest.BlockDigest]int
        TotalSize         int
 }
 
+// ReadCollections holds information about collections from API server
 type ReadCollections struct {
-       ReadAllCollections    bool
-       UuidToCollection      map[string]Collection
-       OwnerToCollectionSize map[string]int
-       BlockToReplication    map[blockdigest.BlockDigest]int
-       // TODO(misha): add block to collection map
+       ReadAllCollections        bool
+       UUIDToCollection          map[string]Collection
+       OwnerToCollectionSize     map[string]int
+       BlockToDesiredReplication map[blockdigest.DigestWithSize]int
+       CollectionUUIDToIndex     map[string]int
+       CollectionIndexToUUID     []string
+       BlockToCollectionIndices  map[blockdigest.DigestWithSize][]int
 }
 
+// GetCollectionsParams params
 type GetCollectionsParams struct {
        Client    arvadosclient.ArvadosClient
        Logger    *logger.Logger
        BatchSize int
 }
 
+// SdkCollectionInfo holds collection info from api
 type SdkCollectionInfo struct {
-       Uuid         string    `json:"uuid"`
-       OwnerUuid    string    `json:"owner_uuid"`
+       UUID         string    `json:"uuid"`
+       OwnerUUID    string    `json:"owner_uuid"`
        Redundancy   int       `json:"redundancy"`
        ModifiedAt   time.Time `json:"modified_at"`
        ManifestText string    `json:"manifest_text"`
 }
 
+// SdkCollectionList lists collections from api
 type SdkCollectionList struct {
        ItemsAvailable int                 `json:"items_available"`
        Items          []SdkCollectionInfo `json:"items"`
 }
 
 func init() {
-       flag.StringVar(&heapProfileFilename,
+       flag.StringVar(&HeapProfileFilename,
                "heap-profile",
                "",
                "File to write the heap profiles to. Leave blank to skip profiling.")
 }
 
-// Write the heap profile to a file for later review.
+// WriteHeapProfile writes the heap profile to a file for later review.
 // Since a file is expected to only contain a single heap profile this
 // function overwrites the previously written profile, so it is safe
 // to call multiple times in a single run.
 // Otherwise we would see cumulative numbers as explained here:
 // https://groups.google.com/d/msg/golang-nuts/ZyHciRglQYc/2nh4Ndu2fZcJ
-func WriteHeapProfile() {
-       if heapProfileFilename != "" {
-
-               heap_profile, err := os.Create(heapProfileFilename)
+func WriteHeapProfile() error {
+       if HeapProfileFilename != "" {
+               heapProfile, err := os.Create(HeapProfileFilename)
                if err != nil {
-                       log.Fatal(err)
+                       return err
                }
 
-               defer heap_profile.Close()
+               defer heapProfile.Close()
 
-               err = pprof.WriteHeapProfile(heap_profile)
-               if err != nil {
-                       log.Fatal(err)
-               }
+               err = pprof.WriteHeapProfile(heapProfile)
+               return err
        }
-}
 
-func GetCollectionsAndSummarize(params GetCollectionsParams) (results ReadCollections) {
-       results = GetCollections(params)
-       Summarize(&results)
+       return nil
+}
 
-       if params.Logger != nil {
-               params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
-                       collectionInfo := p["collection_info"].(map[string]interface{})
-                       // Since maps are shallow copied, we run a risk of concurrent
-                       // updates here. By copying results.OwnerToCollectionSize into
-                       // the log, we're assuming that it won't be updated.
-                       collectionInfo["owner_to_collection_size"] = results.OwnerToCollectionSize
-               })
+// GetCollectionsAndSummarize gets collections from api and summarizes
+func GetCollectionsAndSummarize(params GetCollectionsParams) (results ReadCollections, err error) {
+       results, err = GetCollections(params)
+       if err != nil {
+               return
        }
 
+       results.Summarize(params.Logger)
+
        log.Printf("Uuid to Size used: %v", results.OwnerToCollectionSize)
        log.Printf("Read and processed %d collections",
-               len(results.UuidToCollection))
+               len(results.UUIDToCollection))
 
        // TODO(misha): Add a "readonly" flag. If we're in readonly mode,
        // lots of behaviors can become warnings (and obviously we can't
@@ -122,88 +114,131 @@ func GetCollectionsAndSummarize(params GetCollectionsParams) (results ReadCollec
        return
 }
 
-func GetCollections(params GetCollectionsParams) (results ReadCollections) {
+// GetCollections gets collections from api
+func GetCollections(params GetCollectionsParams) (results ReadCollections, err error) {
        if &params.Client == nil {
-               log.Fatalf("params.Client passed to GetCollections() should " +
+               err = fmt.Errorf("params.Client passed to GetCollections() should " +
                        "contain a valid ArvadosClient, but instead it is nil.")
+               return
        }
 
        fieldsWanted := []string{"manifest_text",
                "owner_uuid",
                "uuid",
-               // TODO(misha): Start using the redundancy field.
                "redundancy",
                "modified_at"}
 
        sdkParams := arvadosclient.Dict{
                "select":  fieldsWanted,
-               "order":   []string{"modified_at ASC"},
-               "filters": [][]string{[]string{"modified_at", ">=", "1900-01-01T00:00:00Z"}}}
+               "order":   []string{"modified_at ASC", "uuid ASC"},
+               "filters": [][]string{[]string{"modified_at", ">=", "1900-01-01T00:00:00Z"}},
+               "offset": 0}
 
        if params.BatchSize > 0 {
                sdkParams["limit"] = params.BatchSize
        }
 
+       var defaultReplicationLevel int
+       {
+               var value interface{}
+               value, err = params.Client.Discovery("defaultCollectionReplication")
+               if err != nil {
+                       return
+               }
+
+               defaultReplicationLevel = int(value.(float64))
+               if defaultReplicationLevel <= 0 {
+                       err = fmt.Errorf("Default collection replication returned by arvados SDK "+
+                               "should be a positive integer but instead it was %d.",
+                               defaultReplicationLevel)
+                       return
+               }
+       }
+
        initialNumberOfCollectionsAvailable, err :=
                util.NumberItemsAvailable(params.Client, "collections")
        if err != nil {
-               loggerutil.FatalWithMessage(params.Logger,
-                       fmt.Sprintf("Error querying collection count: %v", err))
+               return
        }
        // Include a 1% margin for collections added while we're reading so
        // that we don't have to grow the map in most cases.
        maxExpectedCollections := int(
                float64(initialNumberOfCollectionsAvailable) * 1.01)
-       results.UuidToCollection = make(map[string]Collection, maxExpectedCollections)
+       results.UUIDToCollection = make(map[string]Collection, maxExpectedCollections)
 
        if params.Logger != nil {
                params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
-                       collectionInfo := make(map[string]interface{})
+                       collectionInfo := logger.GetOrCreateMap(p, "collection_info")
                        collectionInfo["num_collections_at_start"] = initialNumberOfCollectionsAvailable
                        collectionInfo["batch_size"] = params.BatchSize
-                       p["collection_info"] = collectionInfo
+                       collectionInfo["default_replication_level"] = defaultReplicationLevel
                })
        }
 
        // These values are just for getting the loop to run the first time,
        // afterwards they'll be set to real values.
-       previousTotalCollections := -1
-       totalCollections := 0
-       for totalCollections > previousTotalCollections {
+       remainingCollections := 1
+       var totalCollections int
+       var previousTotalCollections int
+       for remainingCollections > 0 {
                // We're still finding new collections
 
                // Write the heap profile for examining memory usage
-               WriteHeapProfile()
+               err = WriteHeapProfile()
+               if err != nil {
+                       return
+               }
 
                // Get next batch of collections.
                var collections SdkCollectionList
-               err := params.Client.List("collections", sdkParams, &collections)
+               err = params.Client.List("collections", sdkParams, &collections)
                if err != nil {
-                       loggerutil.FatalWithMessage(params.Logger,
-                               fmt.Sprintf("Error querying collections: %v", err))
+                       return
                }
+               batchCollections := len(collections.Items)
+
+               // We must always have at least one collection in the batch
+               if batchCollections < 1 {
+                       err = fmt.Errorf("API query returned no collections for %+v", sdkParams)
+                       return
+               }
+
+               // Update count of remaining collections
+               remainingCollections = collections.ItemsAvailable - sdkParams["offset"].(int) - batchCollections
 
                // Process collection and update our date filter.
-               sdkParams["filters"].([][]string)[0][2] =
-                       ProcessCollections(params.Logger,
-                               collections.Items,
-                               results.UuidToCollection).Format(time.RFC3339)
+               latestModificationDate, maxManifestSize, totalManifestSize, err := ProcessCollections(params.Logger,
+                       collections.Items,
+                       defaultReplicationLevel,
+                       results.UUIDToCollection)
+               if err != nil {
+                       return results, err
+               }
+               if sdkParams["filters"].([][]string)[0][2] != latestModificationDate.Format(time.RFC3339) {
+                       sdkParams["filters"].([][]string)[0][2] = latestModificationDate.Format(time.RFC3339)
+                       sdkParams["offset"] = 0
+               } else {
+                       sdkParams["offset"] = sdkParams["offset"].(int) + batchCollections
+               }
 
                // update counts
                previousTotalCollections = totalCollections
-               totalCollections = len(results.UuidToCollection)
+               totalCollections = len(results.UUIDToCollection)
 
-               log.Printf("%d collections read, %d new in last batch, "+
+               log.Printf("%d collections read, %d (%d new) in last batch, "+
+                       "%d remaining, "+
                        "%s latest modified date, %.0f %d %d avg,max,total manifest size",
                        totalCollections,
+                       batchCollections,
                        totalCollections-previousTotalCollections,
+                       remainingCollections,
                        sdkParams["filters"].([][]string)[0][2],
                        float32(totalManifestSize)/float32(totalCollections),
                        maxManifestSize, totalManifestSize)
 
                if params.Logger != nil {
                        params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
-                               collectionInfo := p["collection_info"].(map[string]interface{})
+                               collectionInfo := logger.GetOrCreateMap(p, "collection_info")
                                collectionInfo["collections_read"] = totalCollections
                                collectionInfo["latest_modified_date_seen"] = sdkParams["filters"].([][]string)[0][2]
                                collectionInfo["total_manifest_size"] = totalManifestSize
@@ -212,11 +247,32 @@ func GetCollections(params GetCollectionsParams) (results ReadCollections) {
                }
        }
 
-       // Just in case this lowers the numbers reported in the heap profile.
-       runtime.GC()
+       // Make one final API request to verify that we have processed all collections available up to the latest modification date
+       var collections SdkCollectionList
+       sdkParams["filters"].([][]string)[0][1] = "<="
+       sdkParams["limit"] = 0
+       err = params.Client.List("collections", sdkParams, &collections)
+       if err != nil {
+               return
+       }
+       finalNumberOfCollectionsAvailable, err :=
+               util.NumberItemsAvailable(params.Client, "collections")
+       if err != nil {
+               return
+       }
+       if totalCollections < finalNumberOfCollectionsAvailable {
+               err = fmt.Errorf("API server indicates a total of %d collections "+
+                               "available up to %v, but we only retrieved %d. "+
+                               "Refusing to continue as this could indicate an "+
+                               "otherwise undetected failure.",
+                               finalNumberOfCollectionsAvailable, 
+                               sdkParams["filters"].([][]string)[0][2],
+                               totalCollections)
+               return
+       }
 
        // Write the heap profile for examining memory usage
-       WriteHeapProfile()
+       err = WriteHeapProfile()
 
        return
 }
@@ -228,23 +284,30 @@ func StrCopy(s string) string {
        return string([]byte(s))
 }
 
+// ProcessCollections read from api server
 func ProcessCollections(arvLogger *logger.Logger,
        receivedCollections []SdkCollectionInfo,
-       uuidToCollection map[string]Collection) (latestModificationDate time.Time) {
+       defaultReplicationLevel int,
+       UUIDToCollection map[string]Collection,
+) (
+       latestModificationDate time.Time,
+       maxManifestSize, totalManifestSize uint64,
+       err error,
+) {
        for _, sdkCollection := range receivedCollections {
-               collection := Collection{Uuid: StrCopy(sdkCollection.Uuid),
-                       OwnerUuid:         StrCopy(sdkCollection.OwnerUuid),
+               collection := Collection{UUID: StrCopy(sdkCollection.UUID),
+                       OwnerUUID:         StrCopy(sdkCollection.OwnerUUID),
                        ReplicationLevel:  sdkCollection.Redundancy,
                        BlockDigestToSize: make(map[blockdigest.BlockDigest]int)}
 
                if sdkCollection.ModifiedAt.IsZero() {
-                       loggerutil.FatalWithMessage(arvLogger,
-                               fmt.Sprintf(
-                                       "Arvados SDK collection returned with unexpected zero "+
-                                               "modifcation date. This probably means that either we failed to "+
-                                               "parse the modification date or the API server has changed how "+
-                                               "it returns modification dates: %+v",
-                                       collection))
+                       err = fmt.Errorf(
+                               "Arvados SDK collection returned with unexpected zero "+
+                                       "modification date. This probably means that either we failed to "+
+                                       "parse the modification date or the API server has changed how "+
+                                       "it returns modification dates: %+v",
+                               collection)
+                       return
                }
 
                if sdkCollection.ModifiedAt.After(latestModificationDate) {
@@ -252,13 +315,13 @@ func ProcessCollections(arvLogger *logger.Logger,
                }
 
                if collection.ReplicationLevel == 0 {
-                       collection.ReplicationLevel = DefaultReplicationLevel
+                       collection.ReplicationLevel = defaultReplicationLevel
                }
 
-               manifest := manifest.Manifest{sdkCollection.ManifestText}
+               manifest := manifest.Manifest{Text: sdkCollection.ManifestText}
                manifestSize := uint64(len(sdkCollection.ManifestText))
 
-               if _, alreadySeen := uuidToCollection[collection.Uuid]; !alreadySeen {
+               if _, alreadySeen := UUIDToCollection[collection.UUID]; !alreadySeen {
                        totalManifestSize += manifestSize
                }
                if manifestSize > maxManifestSize {
@@ -267,22 +330,26 @@ func ProcessCollections(arvLogger *logger.Logger,
 
                blockChannel := manifest.BlockIterWithDuplicates()
                for block := range blockChannel {
-                       if stored_size, stored := collection.BlockDigestToSize[block.Digest]; stored && stored_size != block.Size {
-                               message := fmt.Sprintf(
+                       if storedSize, stored := collection.BlockDigestToSize[block.Digest]; stored && storedSize != block.Size {
+                               log.Printf(
                                        "Collection %s contains multiple sizes (%d and %d) for block %s",
-                                       collection.Uuid,
-                                       stored_size,
+                                       collection.UUID,
+                                       storedSize,
                                        block.Size,
                                        block.Digest)
-                               loggerutil.FatalWithMessage(arvLogger, message)
                        }
                        collection.BlockDigestToSize[block.Digest] = block.Size
                }
+               if manifest.Err != nil {
+                       err = manifest.Err
+                       return
+               }
+
                collection.TotalSize = 0
                for _, size := range collection.BlockDigestToSize {
                        collection.TotalSize += size
                }
-               uuidToCollection[collection.Uuid] = collection
+               UUIDToCollection[collection.UUID] = collection
 
                // Clear out all the manifest strings that we don't need anymore.
                // These hopefully form the bulk of our memory usage.
@@ -293,21 +360,49 @@ func ProcessCollections(arvLogger *logger.Logger,
        return
 }
 
-func Summarize(readCollections *ReadCollections) {
+// Summarize the collections read
+func (readCollections *ReadCollections) Summarize(arvLogger *logger.Logger) {
        readCollections.OwnerToCollectionSize = make(map[string]int)
-       readCollections.BlockToReplication = make(map[blockdigest.BlockDigest]int)
-
-       for _, coll := range readCollections.UuidToCollection {
-               readCollections.OwnerToCollectionSize[coll.OwnerUuid] =
-                       readCollections.OwnerToCollectionSize[coll.OwnerUuid] + coll.TotalSize
-
-               for block, _ := range coll.BlockDigestToSize {
-                       storedReplication := readCollections.BlockToReplication[block]
+       readCollections.BlockToDesiredReplication = make(map[blockdigest.DigestWithSize]int)
+       numCollections := len(readCollections.UUIDToCollection)
+       readCollections.CollectionUUIDToIndex = make(map[string]int, numCollections)
+       readCollections.CollectionIndexToUUID = make([]string, 0, numCollections)
+       readCollections.BlockToCollectionIndices = make(map[blockdigest.DigestWithSize][]int)
+
+       for _, coll := range readCollections.UUIDToCollection {
+               collectionIndex := len(readCollections.CollectionIndexToUUID)
+               readCollections.CollectionIndexToUUID =
+                       append(readCollections.CollectionIndexToUUID, coll.UUID)
+               readCollections.CollectionUUIDToIndex[coll.UUID] = collectionIndex
+
+               readCollections.OwnerToCollectionSize[coll.OwnerUUID] =
+                       readCollections.OwnerToCollectionSize[coll.OwnerUUID] + coll.TotalSize
+
+               for block, size := range coll.BlockDigestToSize {
+                       locator := blockdigest.DigestWithSize{Digest: block, Size: uint32(size)}
+                       readCollections.BlockToCollectionIndices[locator] =
+                               append(readCollections.BlockToCollectionIndices[locator],
+                                       collectionIndex)
+                       storedReplication := readCollections.BlockToDesiredReplication[locator]
                        if coll.ReplicationLevel > storedReplication {
-                               readCollections.BlockToReplication[block] = coll.ReplicationLevel
+                               readCollections.BlockToDesiredReplication[locator] =
+                                       coll.ReplicationLevel
                        }
                }
        }
 
+       if arvLogger != nil {
+               arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+                       collectionInfo := logger.GetOrCreateMap(p, "collection_info")
+                       // Since maps are shallow copied, we run a risk of concurrent
+                       // updates here. By copying results.OwnerToCollectionSize into
+                       // the log, we're assuming that it won't be updated.
+                       collectionInfo["owner_to_collection_size"] =
+                               readCollections.OwnerToCollectionSize
+                       collectionInfo["distinct_blocks_named"] =
+                               len(readCollections.BlockToDesiredReplication)
+               })
+       }
+
        return
 }