Merge branch 'master' into 9998-unsigned_manifest
[arvados.git] / services / datamanager / datamanager_test.go
index 4dc940e5a2ac2ab1404e97d95954e4d02f95dfe6..7a8fff5c32a30d3a79926305df5db1ef81e48f6a 100644 (file)
@@ -6,19 +6,20 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        "git.curoverse.com/arvados.git/sdk/go/keepclient"
-       "git.curoverse.com/arvados.git/services/datamanager/keep"
+       "git.curoverse.com/arvados.git/services/datamanager/collection"
+       "git.curoverse.com/arvados.git/services/datamanager/summary"
        "io/ioutil"
-       "log"
        "net/http"
        "os"
        "os/exec"
+       "path"
        "regexp"
        "strings"
        "testing"
        "time"
 )
 
-var arv arvadosclient.ArvadosClient
+var arv *arvadosclient.ArvadosClient
 var keepClient *keepclient.KeepClient
 var keepServers []string
 
@@ -28,38 +29,40 @@ func SetupDataManagerTest(t *testing.T) {
        // start api and keep servers
        arvadostest.ResetEnv()
        arvadostest.StartAPI()
-       arvadostest.StartKeep()
+       arvadostest.StartKeep(2, false)
 
-       // make arvadosclient
        var err error
        arv, err = arvadosclient.MakeArvadosClient()
        if err != nil {
-               t.Fatal("Error creating arv")
+               t.Fatalf("Error making arvados client: %s", err)
        }
+       arv.ApiToken = arvadostest.DataManagerToken
 
        // keep client
        keepClient = &keepclient.KeepClient{
-               Arvados:       &arv,
-               Want_replicas: 1,
-               Using_proxy:   true,
+               Arvados:       arv,
+               Want_replicas: 2,
                Client:        &http.Client{},
        }
 
        // discover keep services
-       if err := keepClient.DiscoverKeepServers(); err != nil {
-               t.Fatal("Error discovering keep services")
+       if err = keepClient.DiscoverKeepServers(); err != nil {
+               t.Fatalf("Error discovering keep services: %s", err)
        }
+       keepServers = []string{}
        for _, host := range keepClient.LocalRoots() {
                keepServers = append(keepServers, host)
        }
 }
 
 func TearDownDataManagerTest(t *testing.T) {
-       arvadostest.StopKeep()
+       arvadostest.StopKeep(2)
        arvadostest.StopAPI()
+       summary.WriteDataTo = ""
+       collection.HeapProfileFilename = ""
 }
 
-func PutBlock(t *testing.T, data string) string {
+func putBlock(t *testing.T, data string) string {
        locator, _, err := keepClient.PutB([]byte(data))
        if err != nil {
                t.Fatalf("Error putting test data for %s %s %v", data, locator, err)
@@ -72,7 +75,7 @@ func PutBlock(t *testing.T, data string) string {
        return splits[0] + "+" + splits[1]
 }
 
-func GetBlock(t *testing.T, locator string, data string) {
+func getBlock(t *testing.T, locator string, data string) {
        reader, blocklen, _, err := keepClient.Get(locator)
        if err != nil {
                t.Fatalf("Error getting test data in setup for %s %s %v", data, locator, err)
@@ -91,7 +94,7 @@ func GetBlock(t *testing.T, locator string, data string) {
 }
 
 // Create a collection using arv-put
-func CreateCollection(t *testing.T, data string) string {
+func createCollection(t *testing.T, data string) string {
        tempfile, err := ioutil.TempFile(os.TempDir(), "temp-test-file")
        defer os.Remove(tempfile.Name())
 
@@ -110,25 +113,33 @@ func CreateCollection(t *testing.T, data string) string {
        return uuid
 }
 
-// Get collection using arv-get
+// Get collection locator
 var locatorMatcher = regexp.MustCompile(`^([0-9a-f]{32})\+(\d*)(.*)$`)
 
-func GetCollectionManifest(t *testing.T, uuid string) string {
-       output, err := exec.Command("arv-get", uuid).Output()
-       if err != nil {
-               t.Fatalf("Error during arv-get %s", err)
-       }
+func getFirstLocatorFromCollection(t *testing.T, uuid string) string {
+       manifest := getCollection(t, uuid)["manifest_text"].(string)
 
-       locator := strings.Split(string(output), " ")[1]
+       locator := strings.Split(manifest, " ")[1]
        match := locatorMatcher.FindStringSubmatch(locator)
        if match == nil {
-               t.Fatalf("No locator found in collection manifest %s", string(output))
+               t.Fatalf("No locator found in collection manifest %s", manifest)
        }
 
        return match[1] + "+" + match[2]
 }
 
-func GetCollection(t *testing.T, uuid string) Dict {
+func switchToken(t string) func() {
+       orig := arv.ApiToken
+       restore := func() {
+               arv.ApiToken = orig
+       }
+       arv.ApiToken = t
+       return restore
+}
+
+func getCollection(t *testing.T, uuid string) Dict {
+       defer switchToken(arvadostest.AdminToken)()
+
        getback := make(Dict)
        err := arv.Get("collections", uuid, nil, &getback)
        if err != nil {
@@ -141,7 +152,9 @@ func GetCollection(t *testing.T, uuid string) Dict {
        return getback
 }
 
-func UpdateCollection(t *testing.T, uuid string, paramName string, paramValue string) {
+func updateCollection(t *testing.T, uuid string, paramName string, paramValue string) {
+       defer switchToken(arvadostest.AdminToken)()
+
        err := arv.Update("collections", uuid, arvadosclient.Dict{
                "collection": arvadosclient.Dict{
                        paramName: paramValue,
@@ -155,7 +168,9 @@ func UpdateCollection(t *testing.T, uuid string, paramName string, paramValue st
 
 type Dict map[string]interface{}
 
-func DeleteCollection(t *testing.T, uuid string) {
+func deleteCollection(t *testing.T, uuid string) {
+       defer switchToken(arvadostest.AdminToken)()
+
        getback := make(Dict)
        err := arv.Delete("collections", uuid, nil, &getback)
        if err != nil {
@@ -166,20 +181,20 @@ func DeleteCollection(t *testing.T, uuid string) {
        }
 }
 
-func DataManagerSingleRun(t *testing.T) {
-       err := singlerun()
+func dataManagerSingleRun(t *testing.T) {
+       err := singlerun(arv)
        if err != nil {
                t.Fatalf("Error during singlerun %s", err)
        }
 }
 
-func GetBlockIndexesForServer(t *testing.T, i int) []string {
+func getBlockIndexesForServer(t *testing.T, i int) []string {
        var indexes []string
 
        path := keepServers[i] + "/index"
        client := http.Client{}
        req, err := http.NewRequest("GET", path, nil)
-       req.Header.Add("Authorization", "OAuth2 "+keep.GetDataManagerToken(nil))
+       req.Header.Add("Authorization", "OAuth2 "+arvadostest.DataManagerToken)
        req.Header.Add("Content-Type", "application/octet-stream")
        resp, err := client.Do(req)
        defer resp.Body.Close()
@@ -201,32 +216,40 @@ func GetBlockIndexesForServer(t *testing.T, i int) []string {
        return indexes
 }
 
-func GetBlockIndexes(t *testing.T) []string {
-       var indexes []string
+func getBlockIndexes(t *testing.T) [][]string {
+       var indexes [][]string
 
        for i := 0; i < len(keepServers); i++ {
-               indexes = append(indexes, GetBlockIndexesForServer(t, i)...)
+               indexes = append(indexes, getBlockIndexesForServer(t, i))
        }
        return indexes
 }
 
-func VerifyBlocks(t *testing.T, notExpected []string, expected []string) {
-       blocks := GetBlockIndexes(t)
+func verifyBlocks(t *testing.T, notExpected []string, expected []string, minReplication int) {
+       blocks := getBlockIndexes(t)
+
        for _, block := range notExpected {
-               exists := ValueInArray(block, blocks)
-               if exists {
-                       t.Fatalf("Found unexpected block in index %s", block)
+               for _, idx := range blocks {
+                       if valueInArray(block, idx) {
+                               t.Fatalf("Found unexpected block %s", block)
+                       }
                }
        }
+
        for _, block := range expected {
-               exists := ValueInArray(block, blocks)
-               if !exists {
-                       t.Fatalf("Did not find expected block in index %s", block)
+               nFound := 0
+               for _, idx := range blocks {
+                       if valueInArray(block, idx) {
+                               nFound++
+                       }
+               }
+               if nFound < minReplication {
+                       t.Fatalf("Found %d replicas of block %s, expected >= %d", nFound, block, minReplication)
                }
        }
 }
 
-func ValueInArray(value string, list []string) bool {
+func valueInArray(value string, list []string) bool {
        for _, v := range list {
                if value == v {
                        return true
@@ -235,17 +258,14 @@ func ValueInArray(value string, list []string) bool {
        return false
 }
 
-/*
-Test env uses two keep volumes. The volume names can be found by reading the files
-  ARVADOS_HOME/tmp/keep0.volume and ARVADOS_HOME/tmp/keep1.volume
-
-The keep volumes are of the dir structure:
-  volumeN/subdir/locator
-*/
-func BackdateBlocks(t *testing.T, oldBlockLocators []string) {
+// Test env uses two keep volumes. The volume names can be found by reading the files
+// ARVADOS_HOME/tmp/keep0.volume and ARVADOS_HOME/tmp/keep1.volume
+//
+// The keep volumes are of the dir structure: volumeN/subdir/locator
+func backdateBlocks(t *testing.T, oldUnusedBlockLocators []string) {
        // First get rid of any size hints in the locators
        var trimmedBlockLocators []string
-       for _, block := range oldBlockLocators {
+       for _, block := range oldUnusedBlockLocators {
                trimmedBlockLocators = append(trimmedBlockLocators, strings.Split(block, "+")[0])
        }
 
@@ -282,7 +302,7 @@ func BackdateBlocks(t *testing.T, oldBlockLocators []string) {
                        for _, fileInfo := range subdirContents {
                                blockName := fileInfo.Name()
                                myname := fmt.Sprintf("%s/%s", subdirName, blockName)
-                               if ValueInArray(blockName, trimmedBlockLocators) {
+                               if valueInArray(blockName, trimmedBlockLocators) {
                                        err = os.Chtimes(myname, oldTime, oldTime)
                                }
                        }
@@ -290,17 +310,16 @@ func BackdateBlocks(t *testing.T, oldBlockLocators []string) {
        }
 }
 
-func GetStatus(t *testing.T, path string) interface{} {
+func getStatus(t *testing.T, path string) interface{} {
        client := http.Client{}
        req, err := http.NewRequest("GET", path, nil)
-       req.Header.Add("Authorization", "OAuth2 "+keep.GetDataManagerToken(nil))
+       req.Header.Add("Authorization", "OAuth2 "+arvadostest.DataManagerToken)
        req.Header.Add("Content-Type", "application/octet-stream")
        resp, err := client.Do(req)
-       defer resp.Body.Close()
-
        if err != nil {
                t.Fatalf("Error during %s %s", path, err)
        }
+       defer resp.Body.Close()
 
        var s interface{}
        json.NewDecoder(resp.Body).Decode(&s)
@@ -308,192 +327,171 @@ func GetStatus(t *testing.T, path string) interface{} {
        return s
 }
 
-func WaitUntilQueuesFinishWork(t *testing.T) {
-       // Wait until PullQueue and TrashQueue finish their work
-       for {
-               var done [2]bool
-               for i := 0; i < 2; i++ {
-                       s := GetStatus(t, keepServers[i]+"/status.json")
-                       var pullQueueStatus interface{}
-                       pullQueueStatus = s.(map[string]interface{})["PullQueue"]
-                       var trashQueueStatus interface{}
-                       trashQueueStatus = s.(map[string]interface{})["TrashQueue"]
-
-                       if pullQueueStatus.(map[string]interface{})["Queued"] == float64(0) &&
-                               pullQueueStatus.(map[string]interface{})["InProgress"] == float64(0) &&
-                               trashQueueStatus.(map[string]interface{})["Queued"] == float64(0) &&
-                               trashQueueStatus.(map[string]interface{})["InProgress"] == float64(0) {
-                               done[i] = true
+// Wait until PullQueue and TrashQueue are empty on all keepServers.
+func waitUntilQueuesFinishWork(t *testing.T) {
+       for _, ks := range keepServers {
+               for done := false; !done; {
+                       time.Sleep(100 * time.Millisecond)
+                       s := getStatus(t, ks+"/status.json")
+                       for _, qName := range []string{"PullQueue", "TrashQueue"} {
+                               qStatus := s.(map[string]interface{})[qName].(map[string]interface{})
+                               if qStatus["Queued"].(float64)+qStatus["InProgress"].(float64) == 0 {
+                                       done = true
+                               }
                        }
                }
-               if done[0] && done[1] {
-                       break
-               } else {
-                       time.Sleep(1 * time.Second)
-               }
        }
 }
 
-/*
-Create some blocks and backdate some of them.
-Also create some collections and delete some of them.
-Verify block indexes.
-*/
+// Create some blocks and backdate some of them.
+// Also create some collections and delete some of them.
+// Verify block indexes.
 func TestPutAndGetBlocks(t *testing.T) {
-       log.Print("TestPutAndGetBlocks start")
        defer TearDownDataManagerTest(t)
        SetupDataManagerTest(t)
 
        // Put some blocks which will be backdated later on
        // The first one will also be used in a collection and hence should not be deleted when datamanager runs.
        // The rest will be old and unreferenced and hence should be deleted when datamanager runs.
-       var oldBlockLocators []string
-       oldBlockData := "this block will have older mtime"
+       var oldUnusedBlockLocators []string
+       oldUnusedBlockData := "this block will have older mtime"
        for i := 0; i < 5; i++ {
-               oldBlockLocators = append(oldBlockLocators, PutBlock(t, fmt.Sprintf("%s%d", oldBlockData, i)))
+               oldUnusedBlockLocators = append(oldUnusedBlockLocators, putBlock(t, fmt.Sprintf("%s%d", oldUnusedBlockData, i)))
        }
        for i := 0; i < 5; i++ {
-               GetBlock(t, oldBlockLocators[i], fmt.Sprintf("%s%d", oldBlockData, i))
+               getBlock(t, oldUnusedBlockLocators[i], fmt.Sprintf("%s%d", oldUnusedBlockData, i))
        }
 
+       // The rest will be old and unreferenced and hence should be deleted when datamanager runs.
+       oldUsedBlockData := "this collection block will have older mtime"
+       oldUsedBlockLocator := putBlock(t, oldUsedBlockData)
+       getBlock(t, oldUsedBlockLocator, oldUsedBlockData)
+
        // Put some more blocks which will not be backdated; hence they are still new, but not in any collection.
        // Hence, even though unreferenced, these should not be deleted when datamanager runs.
        var newBlockLocators []string
        newBlockData := "this block is newer"
        for i := 0; i < 5; i++ {
-               newBlockLocators = append(newBlockLocators, PutBlock(t, fmt.Sprintf("%s%d", newBlockData, i)))
+               newBlockLocators = append(newBlockLocators, putBlock(t, fmt.Sprintf("%s%d", newBlockData, i)))
        }
        for i := 0; i < 5; i++ {
-               GetBlock(t, newBlockLocators[i], fmt.Sprintf("%s%d", newBlockData, i))
+               getBlock(t, newBlockLocators[i], fmt.Sprintf("%s%d", newBlockData, i))
        }
 
        // Create a collection that would be deleted later on
-       toBeDeletedCollectionUuid := CreateCollection(t, "some data for collection creation")
-       toBeDeletedCollectionLocator := GetCollectionManifest(t, toBeDeletedCollectionUuid)
+       toBeDeletedCollectionUUID := createCollection(t, "some data for collection creation")
+       toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUUID)
 
        // Create another collection that has the same data as the one of the old blocks
-       oldBlockCollectionUuid := CreateCollection(t, "this block will have older mtime0")
-       oldBlockCollectionLocator := GetCollectionManifest(t, oldBlockCollectionUuid)
-       exists := ValueInArray(strings.Split(oldBlockCollectionLocator, "+")[0], oldBlockLocators)
-       if exists {
-               t.Fatalf("Locator of the collection with the same data as old block is different %s", oldBlockCollectionLocator)
+       oldUsedBlockCollectionUUID := createCollection(t, oldUsedBlockData)
+       oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUUID)
+       if oldUsedBlockCollectionLocator != oldUsedBlockLocator {
+               t.Fatalf("Locator of the collection with the same data as old block is different %s", oldUsedBlockCollectionLocator)
        }
 
        // Create another collection whose replication level will be changed
-       replicationCollectionUuid := CreateCollection(t, "replication level on this collection will be reduced")
-       replicationCollectionLocator := GetCollectionManifest(t, replicationCollectionUuid)
+       replicationCollectionUUID := createCollection(t, "replication level on this collection will be reduced")
+       replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUUID)
 
        // Create two collections with same data; one will be deleted later on
        dataForTwoCollections := "one of these collections will be deleted"
-       oneOfTwoWithSameDataUuid := CreateCollection(t, dataForTwoCollections)
-       oneOfTwoWithSameDataLocator := GetCollectionManifest(t, oneOfTwoWithSameDataUuid)
-       secondOfTwoWithSameDataUuid := CreateCollection(t, dataForTwoCollections)
-       secondOfTwoWithSameDataLocator := GetCollectionManifest(t, secondOfTwoWithSameDataUuid)
+       oneOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
+       oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUUID)
+       secondOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
+       secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUUID)
        if oneOfTwoWithSameDataLocator != secondOfTwoWithSameDataLocator {
                t.Fatalf("Locators for both these collections expected to be same: %s %s", oneOfTwoWithSameDataLocator, secondOfTwoWithSameDataLocator)
        }
 
+       // create collection with empty manifest text
+       emptyBlockLocator := putBlock(t, "")
+       emptyCollection := createCollection(t, "")
+
        // Verify blocks before doing any backdating / deleting.
        var expected []string
-       expected = append(expected, oldBlockLocators...)
+       expected = append(expected, oldUnusedBlockLocators...)
        expected = append(expected, newBlockLocators...)
        expected = append(expected, toBeDeletedCollectionLocator)
        expected = append(expected, replicationCollectionLocator)
        expected = append(expected, oneOfTwoWithSameDataLocator)
        expected = append(expected, secondOfTwoWithSameDataLocator)
+       expected = append(expected, emptyBlockLocator)
 
-       VerifyBlocks(t, nil, expected)
+       verifyBlocks(t, nil, expected, 2)
 
        // Run datamanager in singlerun mode
-       DataManagerSingleRun(t)
-       WaitUntilQueuesFinishWork(t)
+       dataManagerSingleRun(t)
+       waitUntilQueuesFinishWork(t)
 
-       log.Print("Backdating blocks and deleting collection now")
+       verifyBlocks(t, nil, expected, 2)
 
        // Backdate the to-be old blocks and delete the collections
-       BackdateBlocks(t, oldBlockLocators)
-       DeleteCollection(t, toBeDeletedCollectionUuid)
-       DeleteCollection(t, secondOfTwoWithSameDataUuid)
+       backdateBlocks(t, oldUnusedBlockLocators)
+       deleteCollection(t, toBeDeletedCollectionUUID)
+       deleteCollection(t, secondOfTwoWithSameDataUUID)
+       backdateBlocks(t, []string{emptyBlockLocator})
+       deleteCollection(t, emptyCollection)
 
        // Run data manager again
-       time.Sleep(1 * time.Second)
-       DataManagerSingleRun(t)
-       WaitUntilQueuesFinishWork(t)
-
-       // Get block indexes and verify that all backdated blocks except the first one are not included.
-       // The first block was also used in a collection that is not deleted and hence should remain.
-       var notExpected []string
-       notExpected = append(notExpected, oldBlockLocators[1:]...)
+       dataManagerSingleRun(t)
+       waitUntilQueuesFinishWork(t)
 
+       // Get block indexes and verify that all backdated blocks except the first one used in collection are not included.
        expected = expected[:0]
-       expected = append(expected, oldBlockLocators[0])
+       expected = append(expected, oldUsedBlockLocator)
        expected = append(expected, newBlockLocators...)
        expected = append(expected, toBeDeletedCollectionLocator)
-       expected = append(expected, replicationCollectionLocator)
        expected = append(expected, oneOfTwoWithSameDataLocator)
        expected = append(expected, secondOfTwoWithSameDataLocator)
+       expected = append(expected, emptyBlockLocator) // even when unreferenced, this remains
 
-       VerifyBlocks(t, notExpected, expected)
+       verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
 
-       // Reduce replication on replicationCollectionUuid collection and verify that the overreplicated blocks are untouched.
-
-       // Default replication level is 2; first verify that the replicationCollectionLocator appears in both volumes
-       for i := 0; i < len(keepServers); i++ {
-               indexes := GetBlockIndexesForServer(t, i)
-               if !ValueInArray(replicationCollectionLocator, indexes) {
-                       t.Fatalf("Not found block in index %s", replicationCollectionLocator)
-               }
-       }
+       // Reduce desired replication on replicationCollectionUUID
+       // collection, and verify that Data Manager does not reduce
+       // actual replication any further than that. (It might not
+       // reduce actual replication at all; that's OK for this test.)
 
-       // Now reduce replication level on this collection and verify that it still appears in both volumes
-       UpdateCollection(t, replicationCollectionUuid, "replication_desired", "1")
-       collection := GetCollection(t, replicationCollectionUuid)
+       // Reduce desired replication level.
+       updateCollection(t, replicationCollectionUUID, "replication_desired", "1")
+       collection := getCollection(t, replicationCollectionUUID)
        if collection["replication_desired"].(interface{}) != float64(1) {
                t.Fatalf("After update replication_desired is not 1; instead it is %v", collection["replication_desired"])
        }
 
-       // Run data manager again
-       time.Sleep(1 * time.Second)
-       DataManagerSingleRun(t)
-       WaitUntilQueuesFinishWork(t)
+       // Verify data is currently overreplicated.
+       verifyBlocks(t, nil, []string{replicationCollectionLocator}, 2)
 
-       for i := 0; i < len(keepServers); i++ {
-               indexes := GetBlockIndexesForServer(t, i)
-               if !ValueInArray(replicationCollectionLocator, indexes) {
-                       t.Fatalf("Not found block in index %s", replicationCollectionLocator)
-               }
-       }
+       // Run data manager again
+       dataManagerSingleRun(t)
+       waitUntilQueuesFinishWork(t)
 
-       // Done testing reduce replication on collection
+       // Verify data is not underreplicated.
+       verifyBlocks(t, nil, []string{replicationCollectionLocator}, 1)
 
-       // Verify blocks one more time
-       VerifyBlocks(t, notExpected, expected)
+       // Verify *other* collections' data is not underreplicated.
+       verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
 }
 
 func TestDatamanagerSingleRunRepeatedly(t *testing.T) {
-       log.Print("TestDatamanagerSingleRunRepeatedly start")
-
        defer TearDownDataManagerTest(t)
        SetupDataManagerTest(t)
 
        for i := 0; i < 10; i++ {
-               err := singlerun()
+               err := singlerun(arv)
                if err != nil {
                        t.Fatalf("Got an error during datamanager singlerun: %v", err)
                }
-               time.Sleep(1 * time.Second)
        }
 }
 
-func _TestGetStatusRepeatedly(t *testing.T) {
-       log.Print("TestGetStatusRepeatedly start")
-
+func TestGetStatusRepeatedly(t *testing.T) {
        defer TearDownDataManagerTest(t)
        SetupDataManagerTest(t)
 
        for i := 0; i < 10; i++ {
                for j := 0; j < 2; j++ {
-                       s := GetStatus(t, keepServers[j]+"/status.json")
+                       s := getStatus(t, keepServers[j]+"/status.json")
 
                        var pullQueueStatus interface{}
                        pullQueueStatus = s.(map[string]interface{})["PullQueue"]
@@ -507,7 +505,228 @@ func _TestGetStatusRepeatedly(t *testing.T) {
                                t.Fatalf("PullQueue and TrashQueue status not found")
                        }
 
-                       time.Sleep(1 * time.Second)
+                       time.Sleep(100 * time.Millisecond)
+               }
+       }
+}
+
+func TestRunDatamanagerWithBogusServer(t *testing.T) {
+       defer TearDownDataManagerTest(t)
+       SetupDataManagerTest(t)
+
+       arv.ApiServer = "bogus-server"
+
+       err := singlerun(arv)
+       if err == nil {
+               t.Fatalf("Expected error during singlerun with bogus server")
+       }
+}
+
+func TestRunDatamanagerAsNonAdminUser(t *testing.T) {
+       defer TearDownDataManagerTest(t)
+       SetupDataManagerTest(t)
+
+       arv.ApiToken = arvadostest.ActiveToken
+
+       err := singlerun(arv)
+       if err == nil {
+               t.Fatalf("Expected error during singlerun as non-admin user")
+       }
+}
+
+func TestPutAndGetBlocks_NoErrorDuringSingleRun(t *testing.T) {
+       testOldBlocksNotDeletedOnDataManagerError(t, "", "", false, false)
+}
+
+func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadWriteTo(t *testing.T) {
+       badpath, err := arvadostest.CreateBadPath()
+       if err != nil {
+               t.Fatalf(err.Error())
+       }
+       defer func() {
+               err = arvadostest.DestroyBadPath(badpath)
+               if err != nil {
+                       t.Fatalf(err.Error())
+               }
+       }()
+       testOldBlocksNotDeletedOnDataManagerError(t, path.Join(badpath, "writetofile"), "", true, true)
+}
+
+func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadHeapProfileFilename(t *testing.T) {
+       badpath, err := arvadostest.CreateBadPath()
+       if err != nil {
+               t.Fatalf(err.Error())
+       }
+       defer func() {
+               err = arvadostest.DestroyBadPath(badpath)
+               if err != nil {
+                       t.Fatalf(err.Error())
+               }
+       }()
+       testOldBlocksNotDeletedOnDataManagerError(t, "", path.Join(badpath, "heapprofilefile"), true, true)
+}
+
+// Create some blocks and backdate some of them.
+// Run datamanager while producing an error condition.
+// Verify that the blocks are hence not deleted.
+func testOldBlocksNotDeletedOnDataManagerError(t *testing.T, writeDataTo string, heapProfileFile string, expectError bool, expectOldBlocks bool) {
+       defer TearDownDataManagerTest(t)
+       SetupDataManagerTest(t)
+
+       // Put some blocks and backdate them.
+       var oldUnusedBlockLocators []string
+       oldUnusedBlockData := "this block will have older mtime"
+       for i := 0; i < 5; i++ {
+               oldUnusedBlockLocators = append(oldUnusedBlockLocators, putBlock(t, fmt.Sprintf("%s%d", oldUnusedBlockData, i)))
+       }
+       backdateBlocks(t, oldUnusedBlockLocators)
+
+       // Run data manager
+       summary.WriteDataTo = writeDataTo
+       collection.HeapProfileFilename = heapProfileFile
+
+       err := singlerun(arv)
+       if !expectError {
+               if err != nil {
+                       t.Fatalf("Got an error during datamanager singlerun: %v", err)
+               }
+       } else {
+               if err == nil {
+                       t.Fatalf("Expected error during datamanager singlerun")
+               }
+       }
+       waitUntilQueuesFinishWork(t)
+
+       // Get block indexes and verify that all backdated blocks are not/deleted as expected
+       if expectOldBlocks {
+               verifyBlocks(t, nil, oldUnusedBlockLocators, 2)
+       } else {
+               verifyBlocks(t, oldUnusedBlockLocators, nil, 2)
+       }
+}
+
+// Create a collection with multiple streams and blocks
+func createMultiStreamBlockCollection(t *testing.T, data string, numStreams, numBlocks int) (string, []string) {
+       defer switchToken(arvadostest.AdminToken)()
+
+       manifest := ""
+       locators := make(map[string]bool)
+       for s := 0; s < numStreams; s++ {
+               manifest += fmt.Sprintf("./stream%d ", s)
+               for b := 0; b < numBlocks; b++ {
+                       locator, _, err := keepClient.PutB([]byte(fmt.Sprintf("%s in stream %d and block %d", data, s, b)))
+                       if err != nil {
+                               t.Fatalf("Error creating block %d in stream %d: %v", b, s, err)
+                       }
+                       locators[strings.Split(locator, "+A")[0]] = true
+                       manifest += locator + " "
                }
+               manifest += "0:1:dummyfile.txt\n"
+       }
+
+       collection := make(Dict)
+       err := arv.Create("collections",
+               arvadosclient.Dict{"collection": arvadosclient.Dict{"manifest_text": manifest}},
+               &collection)
+
+       if err != nil {
+               t.Fatalf("Error creating collection %v", err)
        }
+
+       var locs []string
+       for k := range locators {
+               locs = append(locs, k)
+       }
+
+       return collection["uuid"].(string), locs
+}
+
+// Create collection with multiple streams and blocks; backdate the blocks and but do not delete the collection.
+// Also, create stray block and backdate it.
+// After datamanager run: expect blocks from the collection, but not the stray block.
+func TestManifestWithMultipleStreamsAndBlocks(t *testing.T) {
+       testManifestWithMultipleStreamsAndBlocks(t, 100, 10, "", false)
+}
+
+// Same test as TestManifestWithMultipleStreamsAndBlocks with an additional
+// keepstore of a service type other than "disk". Only the "disk" type services
+// will be indexed by datamanager and hence should work the same way.
+func TestManifestWithMultipleStreamsAndBlocks_WithOneUnsupportedKeepServer(t *testing.T) {
+       testManifestWithMultipleStreamsAndBlocks(t, 2, 2, "testblobstore", false)
+}
+
+// Test datamanager with dry-run. Expect no block to be deleted.
+func TestManifestWithMultipleStreamsAndBlocks_DryRun(t *testing.T) {
+       testManifestWithMultipleStreamsAndBlocks(t, 2, 2, "", true)
+}
+
+func testManifestWithMultipleStreamsAndBlocks(t *testing.T, numStreams, numBlocks int, createExtraKeepServerWithType string, isDryRun bool) {
+       defer TearDownDataManagerTest(t)
+       SetupDataManagerTest(t)
+
+       // create collection whose blocks will be backdated
+       collectionWithOldBlocks, oldBlocks := createMultiStreamBlockCollection(t, "old block", numStreams, numBlocks)
+       if collectionWithOldBlocks == "" {
+               t.Fatalf("Failed to create collection with %d blocks", numStreams*numBlocks)
+       }
+       if len(oldBlocks) != numStreams*numBlocks {
+               t.Fatalf("Not all blocks are created: expected %v, found %v", 1000, len(oldBlocks))
+       }
+
+       // create a stray block that will be backdated
+       strayOldBlock := putBlock(t, "this stray block is old")
+
+       expected := []string{strayOldBlock}
+       expected = append(expected, oldBlocks...)
+       verifyBlocks(t, nil, expected, 2)
+
+       // Backdate old blocks; but the collection still references these blocks
+       backdateBlocks(t, oldBlocks)
+
+       // also backdate the stray old block
+       backdateBlocks(t, []string{strayOldBlock})
+
+       // If requested, create an extra keepserver with the given type
+       // This should be ignored during indexing and hence not change the datamanager outcome
+       var extraKeepServerUUID string
+       if createExtraKeepServerWithType != "" {
+               extraKeepServerUUID = addExtraKeepServer(t, createExtraKeepServerWithType)
+               defer deleteExtraKeepServer(extraKeepServerUUID)
+       }
+
+       // run datamanager
+       dryRun = isDryRun
+       dataManagerSingleRun(t)
+
+       if dryRun {
+               // verify that all blocks, including strayOldBlock, are still to be found
+               verifyBlocks(t, nil, expected, 2)
+       } else {
+               // verify that strayOldBlock is not to be found, but the collections blocks are still there
+               verifyBlocks(t, []string{strayOldBlock}, oldBlocks, 2)
+       }
+}
+
+// Add one more keepstore with the given service type
+func addExtraKeepServer(t *testing.T, serviceType string) string {
+       defer switchToken(arvadostest.AdminToken)()
+
+       extraKeepService := make(arvadosclient.Dict)
+       err := arv.Create("keep_services",
+               arvadosclient.Dict{"keep_service": arvadosclient.Dict{
+                       "service_host":     "localhost",
+                       "service_port":     "21321",
+                       "service_ssl_flag": false,
+                       "service_type":     serviceType}},
+               &extraKeepService)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       return extraKeepService["uuid"].(string)
+}
+
+func deleteExtraKeepServer(uuid string) {
+       defer switchToken(arvadostest.AdminToken)()
+       arv.Delete("keep_services", uuid, nil, nil)
 }