X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/8adab284315900b0d7d2edd1c28957f0e510019e..c512f060057f497030df8266f680d55084c0e860:/services/datamanager/datamanager_test.go diff --git a/services/datamanager/datamanager_test.go b/services/datamanager/datamanager_test.go index 68534b2334..28faf989ce 100644 --- a/services/datamanager/datamanager_test.go +++ b/services/datamanager/datamanager_test.go @@ -6,6 +6,8 @@ import ( "git.curoverse.com/arvados.git/sdk/go/arvadosclient" "git.curoverse.com/arvados.git/sdk/go/arvadostest" "git.curoverse.com/arvados.git/sdk/go/keepclient" + "git.curoverse.com/arvados.git/services/datamanager/collection" + "git.curoverse.com/arvados.git/services/datamanager/summary" "io/ioutil" "net/http" "os" @@ -16,11 +18,6 @@ import ( "time" ) -const ( - ActiveUserToken = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi" - AdminToken = "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h" -) - var arv arvadosclient.ArvadosClient var keepClient *keepclient.KeepClient var keepServers []string @@ -31,9 +28,14 @@ func SetupDataManagerTest(t *testing.T) { // start api and keep servers arvadostest.ResetEnv() arvadostest.StartAPI() - arvadostest.StartKeep() + arvadostest.StartKeep(2, false) - arv = makeArvadosClient() + var err error + arv, err = arvadosclient.MakeArvadosClient() + if err != nil { + t.Fatalf("Error making arvados client: %s", err) + } + arv.ApiToken = arvadostest.DataManagerToken // keep client keepClient = &keepclient.KeepClient{ @@ -44,7 +46,7 @@ func SetupDataManagerTest(t *testing.T) { } // discover keep services - if err := keepClient.DiscoverKeepServers(); err != nil { + if err = keepClient.DiscoverKeepServers(); err != nil { t.Fatalf("Error discovering keep services: %s", err) } keepServers = []string{} @@ -54,8 +56,10 @@ func SetupDataManagerTest(t *testing.T) { } func TearDownDataManagerTest(t *testing.T) { - arvadostest.StopKeep() + arvadostest.StopKeep(2) arvadostest.StopAPI() + summary.WriteDataTo = "" + collection.HeapProfileFilename = "" } func putBlock(t *testing.T, data string) string { @@ -124,7 +128,18 @@ func getFirstLocatorFromCollection(t *testing.T, uuid string) string { return match[1] + "+" + match[2] } +func switchToken(t string) func() { + orig := arv.ApiToken + restore := func() { + arv.ApiToken = orig + } + arv.ApiToken = t + return restore +} + func getCollection(t *testing.T, uuid string) Dict { + defer switchToken(arvadostest.AdminToken)() + getback := make(Dict) err := arv.Get("collections", uuid, nil, &getback) if err != nil { @@ -138,6 +153,8 @@ func getCollection(t *testing.T, uuid string) Dict { } func updateCollection(t *testing.T, uuid string, paramName string, paramValue string) { + defer switchToken(arvadostest.AdminToken)() + err := arv.Update("collections", uuid, arvadosclient.Dict{ "collection": arvadosclient.Dict{ paramName: paramValue, @@ -152,6 +169,8 @@ func updateCollection(t *testing.T, uuid string, paramName string, paramValue st type Dict map[string]interface{} func deleteCollection(t *testing.T, uuid string) { + defer switchToken(arvadostest.AdminToken)() + getback := make(Dict) err := arv.Delete("collections", uuid, nil, &getback) if err != nil { @@ -175,7 +194,7 @@ func getBlockIndexesForServer(t *testing.T, i int) []string { path := keepServers[i] + "/index" client := http.Client{} req, err := http.NewRequest("GET", path, nil) - req.Header.Add("Authorization", "OAuth2 " + AdminToken) + req.Header.Add("Authorization", "OAuth2 "+arvadostest.DataManagerToken) req.Header.Add("Content-Type", "application/octet-stream") resp, err := client.Do(req) defer resp.Body.Close() @@ -206,7 +225,7 @@ func getBlockIndexes(t *testing.T) [][]string { return indexes } -func verifyBlocks(t *testing.T, notExpected []string, expected []string) { +func verifyBlocks(t *testing.T, notExpected []string, expected []string, minReplication int) { blocks := getBlockIndexes(t) for _, block := range notExpected { @@ -224,8 +243,8 @@ func verifyBlocks(t *testing.T, notExpected []string, expected []string) { nFound++ } } - if nFound < 2 { - t.Fatalf("Found %d replicas of block %s, expected >= 2", nFound, block) + if nFound < minReplication { + t.Fatalf("Found %d replicas of block %s, expected >= %d", nFound, block, minReplication) } } } @@ -239,13 +258,10 @@ func valueInArray(value string, list []string) bool { return false } -/* -Test env uses two keep volumes. The volume names can be found by reading the files - ARVADOS_HOME/tmp/keep0.volume and ARVADOS_HOME/tmp/keep1.volume - -The keep volumes are of the dir structure: - volumeN/subdir/locator -*/ +// Test env uses two keep volumes. The volume names can be found by reading the files +// ARVADOS_HOME/tmp/keep0.volume and ARVADOS_HOME/tmp/keep1.volume +// +// The keep volumes are of the dir structure: volumeN/subdir/locator func backdateBlocks(t *testing.T, oldUnusedBlockLocators []string) { // First get rid of any size hints in the locators var trimmedBlockLocators []string @@ -297,7 +313,7 @@ func backdateBlocks(t *testing.T, oldUnusedBlockLocators []string) { func getStatus(t *testing.T, path string) interface{} { client := http.Client{} req, err := http.NewRequest("GET", path, nil) - req.Header.Add("Authorization", "OAuth2 " + AdminToken) + req.Header.Add("Authorization", "OAuth2 "+arvadostest.DataManagerToken) req.Header.Add("Content-Type", "application/octet-stream") resp, err := client.Do(req) if err != nil { @@ -316,10 +332,10 @@ func waitUntilQueuesFinishWork(t *testing.T) { for _, ks := range keepServers { for done := false; !done; { time.Sleep(100 * time.Millisecond) - s := getStatus(t, ks + "/status.json") + s := getStatus(t, ks+"/status.json") for _, qName := range []string{"PullQueue", "TrashQueue"} { qStatus := s.(map[string]interface{})[qName].(map[string]interface{}) - if qStatus["Queued"].(float64) + qStatus["InProgress"].(float64) == 0 { + if qStatus["Queued"].(float64)+qStatus["InProgress"].(float64) == 0 { done = true } } @@ -327,11 +343,9 @@ func waitUntilQueuesFinishWork(t *testing.T) { } } -/* -Create some blocks and backdate some of them. -Also create some collections and delete some of them. -Verify block indexes. -*/ +// Create some blocks and backdate some of them. +// Also create some collections and delete some of them. +// Verify block indexes. func TestPutAndGetBlocks(t *testing.T) { defer TearDownDataManagerTest(t) SetupDataManagerTest(t) @@ -365,30 +379,34 @@ func TestPutAndGetBlocks(t *testing.T) { } // Create a collection that would be deleted later on - toBeDeletedCollectionUuid := createCollection(t, "some data for collection creation") - toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUuid) + toBeDeletedCollectionUUID := createCollection(t, "some data for collection creation") + toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUUID) // Create another collection that has the same data as the one of the old blocks - oldUsedBlockCollectionUuid := createCollection(t, oldUsedBlockData) - oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUuid) + oldUsedBlockCollectionUUID := createCollection(t, oldUsedBlockData) + oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUUID) if oldUsedBlockCollectionLocator != oldUsedBlockLocator { t.Fatalf("Locator of the collection with the same data as old block is different %s", oldUsedBlockCollectionLocator) } // Create another collection whose replication level will be changed - replicationCollectionUuid := createCollection(t, "replication level on this collection will be reduced") - replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUuid) + replicationCollectionUUID := createCollection(t, "replication level on this collection will be reduced") + replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUUID) // Create two collections with same data; one will be deleted later on dataForTwoCollections := "one of these collections will be deleted" - oneOfTwoWithSameDataUuid := createCollection(t, dataForTwoCollections) - oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUuid) - secondOfTwoWithSameDataUuid := createCollection(t, dataForTwoCollections) - secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUuid) + oneOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections) + oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUUID) + secondOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections) + secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUUID) if oneOfTwoWithSameDataLocator != secondOfTwoWithSameDataLocator { t.Fatalf("Locators for both these collections expected to be same: %s %s", oneOfTwoWithSameDataLocator, secondOfTwoWithSameDataLocator) } + // create collection with empty manifest text + emptyBlockLocator := putBlock(t, "") + emptyCollection := createCollection(t, "") + // Verify blocks before doing any backdating / deleting. var expected []string expected = append(expected, oldUnusedBlockLocators...) @@ -397,19 +415,22 @@ func TestPutAndGetBlocks(t *testing.T) { expected = append(expected, replicationCollectionLocator) expected = append(expected, oneOfTwoWithSameDataLocator) expected = append(expected, secondOfTwoWithSameDataLocator) + expected = append(expected, emptyBlockLocator) - verifyBlocks(t, nil, expected) + verifyBlocks(t, nil, expected, 2) // Run datamanager in singlerun mode dataManagerSingleRun(t) waitUntilQueuesFinishWork(t) - verifyBlocks(t, nil, expected) + verifyBlocks(t, nil, expected, 2) // Backdate the to-be old blocks and delete the collections backdateBlocks(t, oldUnusedBlockLocators) - deleteCollection(t, toBeDeletedCollectionUuid) - deleteCollection(t, secondOfTwoWithSameDataUuid) + deleteCollection(t, toBeDeletedCollectionUUID) + deleteCollection(t, secondOfTwoWithSameDataUUID) + backdateBlocks(t, []string{emptyBlockLocator}) + deleteCollection(t, emptyCollection) // Run data manager again dataManagerSingleRun(t) @@ -420,44 +441,36 @@ func TestPutAndGetBlocks(t *testing.T) { expected = append(expected, oldUsedBlockLocator) expected = append(expected, newBlockLocators...) expected = append(expected, toBeDeletedCollectionLocator) - expected = append(expected, replicationCollectionLocator) expected = append(expected, oneOfTwoWithSameDataLocator) expected = append(expected, secondOfTwoWithSameDataLocator) + expected = append(expected, emptyBlockLocator) // even when unreferenced, this remains - verifyBlocks(t, oldUnusedBlockLocators, expected) + verifyBlocks(t, oldUnusedBlockLocators, expected, 2) - // Reduce replication on replicationCollectionUuid collection and verify that the overreplicated blocks are untouched. - - // Default replication level is 2; first verify that the replicationCollectionLocator appears in both volumes - for i := 0; i < len(keepServers); i++ { - indexes := getBlockIndexesForServer(t, i) - if !valueInArray(replicationCollectionLocator, indexes) { - t.Fatalf("Not found block in index %s", replicationCollectionLocator) - } - } + // Reduce desired replication on replicationCollectionUUID + // collection, and verify that Data Manager does not reduce + // actual replication any further than that. (It might not + // reduce actual replication at all; that's OK for this test.) - // Now reduce replication level on this collection and verify that it still appears in both volumes - updateCollection(t, replicationCollectionUuid, "replication_desired", "1") - collection := getCollection(t, replicationCollectionUuid) + // Reduce desired replication level. + updateCollection(t, replicationCollectionUUID, "replication_desired", "1") + collection := getCollection(t, replicationCollectionUUID) if collection["replication_desired"].(interface{}) != float64(1) { t.Fatalf("After update replication_desired is not 1; instead it is %v", collection["replication_desired"]) } + // Verify data is currently overreplicated. + verifyBlocks(t, nil, []string{replicationCollectionLocator}, 2) + // Run data manager again - time.Sleep(100 * time.Millisecond) dataManagerSingleRun(t) waitUntilQueuesFinishWork(t) - for i := 0; i < len(keepServers); i++ { - indexes := getBlockIndexesForServer(t, i) - if !valueInArray(replicationCollectionLocator, indexes) { - t.Fatalf("Not found block in index %s", replicationCollectionLocator) - } - } - // Done testing reduce replication on collection + // Verify data is not underreplicated. + verifyBlocks(t, nil, []string{replicationCollectionLocator}, 1) - // Verify blocks one more time - verifyBlocks(t, oldUnusedBlockLocators, expected) + // Verify *other* collections' data is not underreplicated. + verifyBlocks(t, oldUnusedBlockLocators, expected, 2) } func TestDatamanagerSingleRunRepeatedly(t *testing.T) { @@ -469,7 +482,6 @@ func TestDatamanagerSingleRunRepeatedly(t *testing.T) { if err != nil { t.Fatalf("Got an error during datamanager singlerun: %v", err) } - time.Sleep(100 * time.Millisecond) } } @@ -514,10 +526,133 @@ func TestRunDatamanagerAsNonAdminUser(t *testing.T) { defer TearDownDataManagerTest(t) SetupDataManagerTest(t) - arv.ApiToken = ActiveUserToken + arv.ApiToken = arvadostest.ActiveToken err := singlerun(arv) if err == nil { t.Fatalf("Expected error during singlerun as non-admin user") } } + +func TestPutAndGetBlocks_NoErrorDuringSingleRun(t *testing.T) { + testOldBlocksNotDeletedOnDataManagerError(t, "", "", false, false) +} + +func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadWriteTo(t *testing.T) { + testOldBlocksNotDeletedOnDataManagerError(t, "/badwritetofile", "", true, true) +} + +func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadHeapProfileFilename(t *testing.T) { + testOldBlocksNotDeletedOnDataManagerError(t, "", "/badheapprofilefile", true, true) +} + +// Create some blocks and backdate some of them. +// Run datamanager while producing an error condition. +// Verify that the blocks are hence not deleted. +func testOldBlocksNotDeletedOnDataManagerError(t *testing.T, writeDataTo string, heapProfileFile string, expectError bool, expectOldBlocks bool) { + defer TearDownDataManagerTest(t) + SetupDataManagerTest(t) + + // Put some blocks and backdate them. + var oldUnusedBlockLocators []string + oldUnusedBlockData := "this block will have older mtime" + for i := 0; i < 5; i++ { + oldUnusedBlockLocators = append(oldUnusedBlockLocators, putBlock(t, fmt.Sprintf("%s%d", oldUnusedBlockData, i))) + } + backdateBlocks(t, oldUnusedBlockLocators) + + // Run data manager + summary.WriteDataTo = writeDataTo + collection.HeapProfileFilename = heapProfileFile + + err := singlerun(arv) + if !expectError { + if err != nil { + t.Fatalf("Got an error during datamanager singlerun: %v", err) + } + } else { + if err == nil { + t.Fatalf("Expected error during datamanager singlerun") + } + } + waitUntilQueuesFinishWork(t) + + // Get block indexes and verify that all backdated blocks are not/deleted as expected + if expectOldBlocks { + verifyBlocks(t, nil, oldUnusedBlockLocators, 2) + } else { + verifyBlocks(t, oldUnusedBlockLocators, nil, 2) + } +} + +// Create a collection with multiple streams and blocks +func createMultiStreamBlockCollection(t *testing.T, data string, numStreams, numBlocks int) (string, []string) { + defer switchToken(arvadostest.AdminToken)() + + manifest := "" + locators := make(map[string]bool) + for s := 0; s < numStreams; s++ { + manifest += fmt.Sprintf("./stream%d ", s) + for b := 0; b < numBlocks; b++ { + locator, _, err := keepClient.PutB([]byte(fmt.Sprintf("%s in stream %d and block %d", data, s, b))) + if err != nil { + t.Fatalf("Error creating block %d in stream %d: %v", b, s, err) + } + locators[strings.Split(locator, "+A")[0]] = true + manifest += locator + " " + } + manifest += "0:1:dummyfile.txt\n" + } + + collection := make(Dict) + err := arv.Create("collections", + arvadosclient.Dict{"collection": arvadosclient.Dict{"manifest_text": manifest}}, + &collection) + + if err != nil { + t.Fatalf("Error creating collection %v", err) + } + + var locs []string + for k, _ := range locators { + locs = append(locs, k) + } + + return collection["uuid"].(string), locs +} + +// Create collection with multiple streams and blocks; backdate the blocks and but do not delete the collection. +// Also, create stray block and backdate it. +// After datamanager run: expect blocks from the collection, but not the stray block. +func TestManifestWithMultipleStreamsAndBlocks(t *testing.T) { + defer TearDownDataManagerTest(t) + SetupDataManagerTest(t) + + // create collection whose blocks will be backdated + collectionWithOldBlocks, oldBlocks := createMultiStreamBlockCollection(t, "old block", 100, 10) + if collectionWithOldBlocks == "" { + t.Fatalf("Failed to create collection with 1000 blocks") + } + if len(oldBlocks) != 1000 { + t.Fatalf("Not all blocks are created: expected %v, found %v", 1000, len(oldBlocks)) + } + + // create a stray block that will be backdated + strayOldBlock := putBlock(t, "this stray block is old") + + expected := []string{strayOldBlock} + expected = append(expected, oldBlocks...) + verifyBlocks(t, nil, expected, 2) + + // Backdate old blocks; but the collection still references these blocks + backdateBlocks(t, oldBlocks) + + // also backdate the stray old block + backdateBlocks(t, []string{strayOldBlock}) + + // run datamanager + dataManagerSingleRun(t) + + // verify that strayOldBlock is not to be found, but the collections blocks are still there + verifyBlocks(t, []string{strayOldBlock}, oldBlocks, 2) +}