7255: improve createMultiStreamBlockCollection to create collection with multiple...
[arvados.git] / services / datamanager / datamanager_test.go
index c76d481f2362269f0d85c2242cd09c4fd61a76cd..094cd44b0f2b0d7dcb714554587f762519f82bf5 100644 (file)
@@ -16,11 +16,6 @@ import (
        "time"
 )
 
-const (
-       ActiveUserToken = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"
-       AdminToken = "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h"
-)
-
 var arv arvadosclient.ArvadosClient
 var keepClient *keepclient.KeepClient
 var keepServers []string
@@ -31,9 +26,10 @@ func SetupDataManagerTest(t *testing.T) {
        // start api and keep servers
        arvadostest.ResetEnv()
        arvadostest.StartAPI()
-       arvadostest.StartKeep()
+       arvadostest.StartKeep(2, false)
 
        arv = makeArvadosClient()
+       arv.ApiToken = arvadostest.DataManagerToken
 
        // keep client
        keepClient = &keepclient.KeepClient{
@@ -47,13 +43,14 @@ func SetupDataManagerTest(t *testing.T) {
        if err := keepClient.DiscoverKeepServers(); err != nil {
                t.Fatalf("Error discovering keep services: %s", err)
        }
+       keepServers = []string{}
        for _, host := range keepClient.LocalRoots() {
                keepServers = append(keepServers, host)
        }
 }
 
 func TearDownDataManagerTest(t *testing.T) {
-       arvadostest.StopKeep()
+       arvadostest.StopKeep(2)
        arvadostest.StopAPI()
 }
 
@@ -123,7 +120,18 @@ func getFirstLocatorFromCollection(t *testing.T, uuid string) string {
        return match[1] + "+" + match[2]
 }
 
+func switchToken(t string) func() {
+       orig := arv.ApiToken
+       restore := func() {
+               arv.ApiToken = orig
+       }
+       arv.ApiToken = t
+       return restore
+}
+
 func getCollection(t *testing.T, uuid string) Dict {
+       defer switchToken(arvadostest.AdminToken)()
+
        getback := make(Dict)
        err := arv.Get("collections", uuid, nil, &getback)
        if err != nil {
@@ -137,6 +145,8 @@ func getCollection(t *testing.T, uuid string) Dict {
 }
 
 func updateCollection(t *testing.T, uuid string, paramName string, paramValue string) {
+       defer switchToken(arvadostest.AdminToken)()
+
        err := arv.Update("collections", uuid, arvadosclient.Dict{
                "collection": arvadosclient.Dict{
                        paramName: paramValue,
@@ -151,6 +161,8 @@ func updateCollection(t *testing.T, uuid string, paramName string, paramValue st
 type Dict map[string]interface{}
 
 func deleteCollection(t *testing.T, uuid string) {
+       defer switchToken(arvadostest.AdminToken)()
+
        getback := make(Dict)
        err := arv.Delete("collections", uuid, nil, &getback)
        if err != nil {
@@ -174,7 +186,7 @@ func getBlockIndexesForServer(t *testing.T, i int) []string {
        path := keepServers[i] + "/index"
        client := http.Client{}
        req, err := http.NewRequest("GET", path, nil)
-       req.Header.Add("Authorization", "OAuth2 " + AdminToken)
+       req.Header.Add("Authorization", "OAuth2 "+arvadostest.DataManagerToken)
        req.Header.Add("Content-Type", "application/octet-stream")
        resp, err := client.Do(req)
        defer resp.Body.Close()
@@ -205,7 +217,7 @@ func getBlockIndexes(t *testing.T) [][]string {
        return indexes
 }
 
-func verifyBlocks(t *testing.T, notExpected []string, expected []string) {
+func verifyBlocks(t *testing.T, notExpected []string, expected []string, minReplication int) {
        blocks := getBlockIndexes(t)
 
        for _, block := range notExpected {
@@ -223,8 +235,8 @@ func verifyBlocks(t *testing.T, notExpected []string, expected []string) {
                                nFound++
                        }
                }
-               if nFound < 2 {
-                       t.Fatalf("Found %d replicas of block %s, expected >= 2", nFound, block)
+               if nFound < minReplication {
+                       t.Fatalf("Found %d replicas of block %s, expected >= %d", nFound, block, minReplication)
                }
        }
 }
@@ -296,14 +308,13 @@ func backdateBlocks(t *testing.T, oldUnusedBlockLocators []string) {
 func getStatus(t *testing.T, path string) interface{} {
        client := http.Client{}
        req, err := http.NewRequest("GET", path, nil)
-       req.Header.Add("Authorization", "OAuth2 " + AdminToken)
+       req.Header.Add("Authorization", "OAuth2 "+arvadostest.DataManagerToken)
        req.Header.Add("Content-Type", "application/octet-stream")
        resp, err := client.Do(req)
-       defer resp.Body.Close()
-
        if err != nil {
                t.Fatalf("Error during %s %s", path, err)
        }
+       defer resp.Body.Close()
 
        var s interface{}
        json.NewDecoder(resp.Body).Decode(&s)
@@ -311,28 +322,18 @@ func getStatus(t *testing.T, path string) interface{} {
        return s
 }
 
+// Wait until PullQueue and TrashQueue are empty on all keepServers.
 func waitUntilQueuesFinishWork(t *testing.T) {
-       // Wait until PullQueue and TrashQueue finish their work
-       for {
-               var done [2]bool
-               for i := 0; i < 2; i++ {
-                       s := getStatus(t, keepServers[i]+"/status.json")
-                       var pullQueueStatus interface{}
-                       pullQueueStatus = s.(map[string]interface{})["PullQueue"]
-                       var trashQueueStatus interface{}
-                       trashQueueStatus = s.(map[string]interface{})["TrashQueue"]
-
-                       if pullQueueStatus.(map[string]interface{})["Queued"] == float64(0) &&
-                               pullQueueStatus.(map[string]interface{})["InProgress"] == float64(0) &&
-                               trashQueueStatus.(map[string]interface{})["Queued"] == float64(0) &&
-                               trashQueueStatus.(map[string]interface{})["InProgress"] == float64(0) {
-                               done[i] = true
-                       }
-               }
-               if done[0] && done[1] {
-                       break
-               } else {
+       for _, ks := range keepServers {
+               for done := false; !done; {
                        time.Sleep(100 * time.Millisecond)
+                       s := getStatus(t, ks+"/status.json")
+                       for _, qName := range []string{"PullQueue", "TrashQueue"} {
+                               qStatus := s.(map[string]interface{})[qName].(map[string]interface{})
+                               if qStatus["Queued"].(float64)+qStatus["InProgress"].(float64) == 0 {
+                                       done = true
+                               }
+                       }
                }
        }
 }
@@ -375,30 +376,34 @@ func TestPutAndGetBlocks(t *testing.T) {
        }
 
        // Create a collection that would be deleted later on
-       toBeDeletedCollectionUuid := createCollection(t, "some data for collection creation")
-       toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUuid)
+       toBeDeletedCollectionUUID := createCollection(t, "some data for collection creation")
+       toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUUID)
 
        // Create another collection that has the same data as the one of the old blocks
-       oldUsedBlockCollectionUuid := createCollection(t, oldUsedBlockData)
-       oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUuid)
+       oldUsedBlockCollectionUUID := createCollection(t, oldUsedBlockData)
+       oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUUID)
        if oldUsedBlockCollectionLocator != oldUsedBlockLocator {
                t.Fatalf("Locator of the collection with the same data as old block is different %s", oldUsedBlockCollectionLocator)
        }
 
        // Create another collection whose replication level will be changed
-       replicationCollectionUuid := createCollection(t, "replication level on this collection will be reduced")
-       replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUuid)
+       replicationCollectionUUID := createCollection(t, "replication level on this collection will be reduced")
+       replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUUID)
 
        // Create two collections with same data; one will be deleted later on
        dataForTwoCollections := "one of these collections will be deleted"
-       oneOfTwoWithSameDataUuid := createCollection(t, dataForTwoCollections)
-       oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUuid)
-       secondOfTwoWithSameDataUuid := createCollection(t, dataForTwoCollections)
-       secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUuid)
+       oneOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
+       oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUUID)
+       secondOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
+       secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUUID)
        if oneOfTwoWithSameDataLocator != secondOfTwoWithSameDataLocator {
                t.Fatalf("Locators for both these collections expected to be same: %s %s", oneOfTwoWithSameDataLocator, secondOfTwoWithSameDataLocator)
        }
 
+       // create collection with empty manifest text
+       emptyBlockLocator := putBlock(t, "")
+       emptyCollection := createCollection(t, "")
+
        // Verify blocks before doing any backdating / deleting.
        var expected []string
        expected = append(expected, oldUnusedBlockLocators...)
@@ -407,19 +412,22 @@ func TestPutAndGetBlocks(t *testing.T) {
        expected = append(expected, replicationCollectionLocator)
        expected = append(expected, oneOfTwoWithSameDataLocator)
        expected = append(expected, secondOfTwoWithSameDataLocator)
+       expected = append(expected, emptyBlockLocator)
 
-       verifyBlocks(t, nil, expected)
+       verifyBlocks(t, nil, expected, 2)
 
        // Run datamanager in singlerun mode
        dataManagerSingleRun(t)
        waitUntilQueuesFinishWork(t)
 
-       verifyBlocks(t, nil, expected)
+       verifyBlocks(t, nil, expected, 2)
 
        // Backdate the to-be old blocks and delete the collections
        backdateBlocks(t, oldUnusedBlockLocators)
-       deleteCollection(t, toBeDeletedCollectionUuid)
-       deleteCollection(t, secondOfTwoWithSameDataUuid)
+       deleteCollection(t, toBeDeletedCollectionUUID)
+       deleteCollection(t, secondOfTwoWithSameDataUUID)
+       backdateBlocks(t, []string{emptyBlockLocator})
+       deleteCollection(t, emptyCollection)
 
        // Run data manager again
        dataManagerSingleRun(t)
@@ -430,44 +438,36 @@ func TestPutAndGetBlocks(t *testing.T) {
        expected = append(expected, oldUsedBlockLocator)
        expected = append(expected, newBlockLocators...)
        expected = append(expected, toBeDeletedCollectionLocator)
-       expected = append(expected, replicationCollectionLocator)
        expected = append(expected, oneOfTwoWithSameDataLocator)
        expected = append(expected, secondOfTwoWithSameDataLocator)
+       expected = append(expected, emptyBlockLocator) // even when unreferenced, this remains
 
-       verifyBlocks(t, oldUnusedBlockLocators, expected)
-
-       // Reduce replication on replicationCollectionUuid collection and verify that the overreplicated blocks are untouched.
+       verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
 
-       // Default replication level is 2; first verify that the replicationCollectionLocator appears in both volumes
-       for i := 0; i < len(keepServers); i++ {
-               indexes := getBlockIndexesForServer(t, i)
-               if !valueInArray(replicationCollectionLocator, indexes) {
-                       t.Fatalf("Not found block in index %s", replicationCollectionLocator)
-               }
-       }
+       // Reduce desired replication on replicationCollectionUUID
+       // collection, and verify that Data Manager does not reduce
+       // actual replication any further than that. (It might not
+       // reduce actual replication at all; that's OK for this test.)
 
-       // Now reduce replication level on this collection and verify that it still appears in both volumes
-       updateCollection(t, replicationCollectionUuid, "replication_desired", "1")
-       collection := getCollection(t, replicationCollectionUuid)
+       // Reduce desired replication level.
+       updateCollection(t, replicationCollectionUUID, "replication_desired", "1")
+       collection := getCollection(t, replicationCollectionUUID)
        if collection["replication_desired"].(interface{}) != float64(1) {
                t.Fatalf("After update replication_desired is not 1; instead it is %v", collection["replication_desired"])
        }
 
+       // Verify data is currently overreplicated.
+       verifyBlocks(t, nil, []string{replicationCollectionLocator}, 2)
+
        // Run data manager again
-       time.Sleep(100 * time.Millisecond)
        dataManagerSingleRun(t)
        waitUntilQueuesFinishWork(t)
 
-       for i := 0; i < len(keepServers); i++ {
-               indexes := getBlockIndexesForServer(t, i)
-               if !valueInArray(replicationCollectionLocator, indexes) {
-                       t.Fatalf("Not found block in index %s", replicationCollectionLocator)
-               }
-       }
-       // Done testing reduce replication on collection
+       // Verify data is not underreplicated.
+       verifyBlocks(t, nil, []string{replicationCollectionLocator}, 1)
 
-       // Verify blocks one more time
-       verifyBlocks(t, oldUnusedBlockLocators, expected)
+       // Verify *other* collections' data is not underreplicated.
+       verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
 }
 
 func TestDatamanagerSingleRunRepeatedly(t *testing.T) {
@@ -479,13 +479,10 @@ func TestDatamanagerSingleRunRepeatedly(t *testing.T) {
                if err != nil {
                        t.Fatalf("Got an error during datamanager singlerun: %v", err)
                }
-               time.Sleep(100 * time.Millisecond)
        }
 }
 
 func TestGetStatusRepeatedly(t *testing.T) {
-       t.Skip("This test still fails. Skip it until it is fixed.")
-
        defer TearDownDataManagerTest(t)
        SetupDataManagerTest(t)
 
@@ -526,10 +523,100 @@ func TestRunDatamanagerAsNonAdminUser(t *testing.T) {
        defer TearDownDataManagerTest(t)
        SetupDataManagerTest(t)
 
-       arv.ApiToken = ActiveUserToken
+       arv.ApiToken = arvadostest.ActiveToken
 
        err := singlerun(arv)
        if err == nil {
                t.Fatalf("Expected error during singlerun as non-admin user")
        }
 }
+
+// Create a collection with multiple streams and blocks
+func createMultiStreamBlockCollection(t *testing.T, data string, numStreams, numBlocks int) (string, []string) {
+       defer switchToken(arvadostest.AdminToken)()
+
+       manifest := ""
+       var locators []string
+       for s := 0; s < numStreams; s++ {
+               manifest += fmt.Sprintf("./stream%d ", s)
+               for b := 0; b < numBlocks; b++ {
+                       locator, _, err := keepClient.PutB([]byte(fmt.Sprintf("%s in stream %d and block %d", data, s, b)))
+                       if err != nil {
+                               t.Fatalf("Error creating block %d in stream %d: %v", b, s, err)
+                       }
+                       locators = append(locators, strings.Split(locator, "+A")[0])
+                       manifest += locator + " "
+               }
+               manifest += "0:1:dummyfile.txt\n"
+       }
+
+       collection := make(Dict)
+       err := arv.Create("collections",
+               arvadosclient.Dict{"collection": arvadosclient.Dict{"manifest_text": manifest}},
+               &collection)
+
+       if err != nil {
+               t.Fatalf("Error creating collection %v", err)
+       }
+
+       return collection["uuid"].(string), locators
+}
+
+/*
+  Create collection with multiple streams and blocks; backdate the blocks and but do not delete the collection.
+  Create another collection with multiple streams and blocks; backdate it's blocks and delete the collection.
+  After datamanager run: expect blocks from the first collection, but none from the second collection.
+*/
+func TestPutAndGetCollectionsWithMultipleStreamsAndBlocks(t *testing.T) {
+       defer TearDownDataManagerTest(t)
+       SetupDataManagerTest(t)
+
+       // create collection whose blocks will be backdated
+       collectionWithOldBlocks, oldBlocks := createMultiStreamBlockCollection(t, "old block", 100, 10)
+       if collectionWithOldBlocks == "" {
+               t.Fatalf("Failed to create collection with 1000 blocks")
+       }
+       if len(oldBlocks) != 1000 {
+               t.Fatalf("Not all blocks are created: expected %v, found %v", 1000, len(oldBlocks))
+       }
+
+       // create another collection, whose blocks will be backdated and the collection will be deleted
+       toBeDeletedCollection, toBeDeletedCollectionBlocks := createMultiStreamBlockCollection(t, "new block", 2, 5)
+       if toBeDeletedCollection == "" {
+               t.Fatalf("Failed to create collection with 10 blocks")
+       }
+
+       // create a stray block that will be backdated
+       strayOldBlock := putBlock(t, "this stray block is old")
+
+       // create another block that will not be backdated
+       strayNewBlock := putBlock(t, "this stray block is new")
+
+       expected := []string{}
+       expected = append(expected, oldBlocks...)
+       expected = append(expected, toBeDeletedCollectionBlocks...)
+       expected = append(expected, strayOldBlock)
+       expected = append(expected, strayNewBlock)
+       verifyBlocks(t, nil, expected, 2)
+
+       // Backdate old blocks; but the collection still references these blocks
+       backdateBlocks(t, oldBlocks)
+
+       // Backdate first block from the newer blocks and delete the collection; the rest are still be reachable
+       backdateBlocks(t, toBeDeletedCollectionBlocks)
+       deleteCollection(t, toBeDeletedCollection)
+
+       // also backdate the stray old block
+       backdateBlocks(t, []string{strayOldBlock})
+
+       // run datamanager
+       dataManagerSingleRun(t)
+
+       expected = []string{strayNewBlock}
+       expected = append(expected, oldBlocks...)
+
+       notExpected := []string{strayOldBlock}
+       notExpected = append(notExpected, toBeDeletedCollectionBlocks...)
+
+       verifyBlocks(t, notExpected, expected, 2)
+}