"git.curoverse.com/arvados.git/services/datamanager/loggerutil"
"log"
"os"
- "runtime"
"runtime/pprof"
"time"
)
)
const (
+ // TODO(misha): Read this value from the SDK once support is added
+ // as suggested in https://arvados.org/issues/3408#note-31
DefaultReplicationLevel = 2
)
}
type ReadCollections struct {
- ReadAllCollections bool
- UuidToCollection map[string]Collection
- OwnerToCollectionSize map[string]int
- BlockToReplication map[blockdigest.BlockDigest]int
- CollectionUuidToIndex map[string]int
- CollectionIndexToUuid []string
- BlockToCollectionIndices map[blockdigest.BlockDigest][]int
+ ReadAllCollections bool
+ UuidToCollection map[string]Collection
+ OwnerToCollectionSize map[string]int
+ BlockToDesiredReplication map[blockdigest.DigestWithSize]int
+ CollectionUuidToIndex map[string]int
+ CollectionIndexToUuid []string
+ BlockToCollectionIndices map[blockdigest.DigestWithSize][]int
}
type GetCollectionsParams struct {
func GetCollectionsAndSummarize(params GetCollectionsParams) (results ReadCollections) {
results = GetCollections(params)
- results.Summarize()
-
- if params.Logger != nil {
- params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- collectionInfo := p["collection_info"].(map[string]interface{})
- // Since maps are shallow copied, we run a risk of concurrent
- // updates here. By copying results.OwnerToCollectionSize into
- // the log, we're assuming that it won't be updated.
- collectionInfo["owner_to_collection_size"] = results.OwnerToCollectionSize
- })
- }
+ results.Summarize(params.Logger)
log.Printf("Uuid to Size used: %v", results.OwnerToCollectionSize)
log.Printf("Read and processed %d collections",
fieldsWanted := []string{"manifest_text",
"owner_uuid",
"uuid",
- // TODO(misha): Start using the redundancy field.
"redundancy",
"modified_at"}
if params.Logger != nil {
params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- collectionInfo := make(map[string]interface{})
+ collectionInfo := logger.GetOrCreateMap(p, "collection_info")
collectionInfo["num_collections_at_start"] = initialNumberOfCollectionsAvailable
collectionInfo["batch_size"] = params.BatchSize
- p["collection_info"] = collectionInfo
})
}
if params.Logger != nil {
params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- collectionInfo := p["collection_info"].(map[string]interface{})
+ collectionInfo := logger.GetOrCreateMap(p, "collection_info")
collectionInfo["collections_read"] = totalCollections
collectionInfo["latest_modified_date_seen"] = sdkParams["filters"].([][]string)[0][2]
collectionInfo["total_manifest_size"] = totalManifestSize
}
}
- // Just in case this lowers the numbers reported in the heap profile.
- runtime.GC()
-
// Write the heap profile for examining memory usage
WriteHeapProfile()
loggerutil.FatalWithMessage(arvLogger,
fmt.Sprintf(
"Arvados SDK collection returned with unexpected zero "+
- "modifcation date. This probably means that either we failed to "+
+ "modification date. This probably means that either we failed to "+
"parse the modification date or the API server has changed how "+
"it returns modification dates: %+v",
collection))
return
}
-func (readCollections *ReadCollections) Summarize() {
+func (readCollections *ReadCollections) Summarize(arvLogger *logger.Logger) {
readCollections.OwnerToCollectionSize = make(map[string]int)
- readCollections.BlockToReplication = make(map[blockdigest.BlockDigest]int)
+ readCollections.BlockToDesiredReplication = make(map[blockdigest.DigestWithSize]int)
numCollections := len(readCollections.UuidToCollection)
readCollections.CollectionUuidToIndex = make(map[string]int, numCollections)
readCollections.CollectionIndexToUuid = make([]string, 0, numCollections)
- readCollections.BlockToCollectionIndices = make(map[blockdigest.BlockDigest][]int)
+ readCollections.BlockToCollectionIndices = make(map[blockdigest.DigestWithSize][]int)
for _, coll := range readCollections.UuidToCollection {
collectionIndex := len(readCollections.CollectionIndexToUuid)
readCollections.OwnerToCollectionSize[coll.OwnerUuid] =
readCollections.OwnerToCollectionSize[coll.OwnerUuid] + coll.TotalSize
- for block, _ := range coll.BlockDigestToSize {
- readCollections.BlockToCollectionIndices[block] =
- append(readCollections.BlockToCollectionIndices[block], collectionIndex)
- storedReplication := readCollections.BlockToReplication[block]
+ for block, size := range coll.BlockDigestToSize {
+ locator := blockdigest.DigestWithSize{Digest: block, Size: uint32(size)}
+ readCollections.BlockToCollectionIndices[locator] =
+ append(readCollections.BlockToCollectionIndices[locator],
+ collectionIndex)
+ storedReplication := readCollections.BlockToDesiredReplication[locator]
if coll.ReplicationLevel > storedReplication {
- readCollections.BlockToReplication[block] = coll.ReplicationLevel
+ readCollections.BlockToDesiredReplication[locator] =
+ coll.ReplicationLevel
}
}
}
+ if arvLogger != nil {
+ arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+ collectionInfo := logger.GetOrCreateMap(p, "collection_info")
+ // Since maps are shallow copied, we run a risk of concurrent
+ // updates here. By copying results.OwnerToCollectionSize into
+ // the log, we're assuming that it won't be updated.
+ collectionInfo["owner_to_collection_size"] =
+ readCollections.OwnerToCollectionSize
+ collectionInfo["distinct_blocks_named"] =
+ len(readCollections.BlockToDesiredReplication)
+ })
+ }
+
return
}