X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/c4ae463b42ee22180982d78289e6898224e508e4..c512f060057f497030df8266f680d55084c0e860:/services/datamanager/datamanager_test.go diff --git a/services/datamanager/datamanager_test.go b/services/datamanager/datamanager_test.go index 374b9012c4..28faf989ce 100644 --- a/services/datamanager/datamanager_test.go +++ b/services/datamanager/datamanager_test.go @@ -6,6 +6,8 @@ import ( "git.curoverse.com/arvados.git/sdk/go/arvadosclient" "git.curoverse.com/arvados.git/sdk/go/arvadostest" "git.curoverse.com/arvados.git/sdk/go/keepclient" + "git.curoverse.com/arvados.git/services/datamanager/collection" + "git.curoverse.com/arvados.git/services/datamanager/summary" "io/ioutil" "net/http" "os" @@ -28,7 +30,11 @@ func SetupDataManagerTest(t *testing.T) { arvadostest.StartAPI() arvadostest.StartKeep(2, false) - arv = makeArvadosClient() + var err error + arv, err = arvadosclient.MakeArvadosClient() + if err != nil { + t.Fatalf("Error making arvados client: %s", err) + } arv.ApiToken = arvadostest.DataManagerToken // keep client @@ -40,7 +46,7 @@ func SetupDataManagerTest(t *testing.T) { } // discover keep services - if err := keepClient.DiscoverKeepServers(); err != nil { + if err = keepClient.DiscoverKeepServers(); err != nil { t.Fatalf("Error discovering keep services: %s", err) } keepServers = []string{} @@ -52,6 +58,8 @@ func SetupDataManagerTest(t *testing.T) { func TearDownDataManagerTest(t *testing.T) { arvadostest.StopKeep(2) arvadostest.StopAPI() + summary.WriteDataTo = "" + collection.HeapProfileFilename = "" } func putBlock(t *testing.T, data string) string { @@ -250,13 +258,10 @@ func valueInArray(value string, list []string) bool { return false } -/* -Test env uses two keep volumes. The volume names can be found by reading the files - ARVADOS_HOME/tmp/keep0.volume and ARVADOS_HOME/tmp/keep1.volume - -The keep volumes are of the dir structure: - volumeN/subdir/locator -*/ +// Test env uses two keep volumes. The volume names can be found by reading the files +// ARVADOS_HOME/tmp/keep0.volume and ARVADOS_HOME/tmp/keep1.volume +// +// The keep volumes are of the dir structure: volumeN/subdir/locator func backdateBlocks(t *testing.T, oldUnusedBlockLocators []string) { // First get rid of any size hints in the locators var trimmedBlockLocators []string @@ -338,11 +343,9 @@ func waitUntilQueuesFinishWork(t *testing.T) { } } -/* -Create some blocks and backdate some of them. -Also create some collections and delete some of them. -Verify block indexes. -*/ +// Create some blocks and backdate some of them. +// Also create some collections and delete some of them. +// Verify block indexes. func TestPutAndGetBlocks(t *testing.T) { defer TearDownDataManagerTest(t) SetupDataManagerTest(t) @@ -531,170 +534,125 @@ func TestRunDatamanagerAsNonAdminUser(t *testing.T) { } } -/* - Create a collection with multiple streams and blocks using arv-put - Generated manifest will be for the format: - ./stream034036412 ae1426cd6bb371ffd4e8eedf5e9f8288+265+A28b017187d22154d8ae2836d5644312196ddede9@565f7801 0:53:temp-test-file101128526 53:53:temp-test-file525370191 106:53:temp-test-file767521515 159:53:temp-test-file914425264 212:53:temp-test-file989413461 - ./stream043441762 2b421156d2751447d6fa22fda6742769+265+A4766df9d5a455d76ec3fbd5d1ceea6ab1207967d@565f7801 0:53:temp-test-file016029341 53:53:temp-test-file546920630 106:53:temp-test-file688432627 159:53:temp-test-file823040996 212:53:temp-test-file843401817 -*/ -func createMultiStreamBlockCollection(t *testing.T, data string, numStreams, numBlocks int) string { - tempdir, err := ioutil.TempDir(os.TempDir(), "temp-test-dir") - if err != nil { - t.Fatalf("Error creating tempdir %s", err) - } - defer os.Remove(tempdir) - - for i := 0; i < numStreams; i++ { - stream, err := ioutil.TempDir(tempdir, "stream") - if err != nil { - t.Fatalf("Error creating stream tempdir %s", err) - } - defer os.Remove(stream) - - for j := 0; j < numBlocks; j++ { - tempfile, err := ioutil.TempFile(stream, "temp-test-file") - if err != nil { - t.Fatalf("Error creating tempfile %s", err) - } - defer os.Remove(tempfile.Name()) - - _, err = tempfile.Write([]byte(fmt.Sprintf("%s%d", data, i))) - if err != nil { - t.Fatalf("Error writing to tempfile %v", err) - } - } - } - - output, err := exec.Command("arv-put", tempdir).Output() - if err != nil { - t.Fatalf("Error running arv-put %s", err) - } - - uuid := string(output[0:27]) // trim terminating char - return uuid +func TestPutAndGetBlocks_NoErrorDuringSingleRun(t *testing.T) { + testOldBlocksNotDeletedOnDataManagerError(t, "", "", false, false) } -func geLocatorsFromCollection(t *testing.T, uuid string) []string { - manifest := getCollection(t, uuid)["manifest_text"].(string) - - locators := []string{} - splits := strings.Split(manifest, " ") - for _, locator := range splits { - match := locatorMatcher.FindStringSubmatch(locator) - if match != nil { - locators = append(locators, match[1]+"+"+match[2]) - } - } +func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadWriteTo(t *testing.T) { + testOldBlocksNotDeletedOnDataManagerError(t, "/badwritetofile", "", true, true) +} - return locators +func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadHeapProfileFilename(t *testing.T) { + testOldBlocksNotDeletedOnDataManagerError(t, "", "/badheapprofilefile", true, true) } -/* - Create collection with multiple streams and blocks; backdate the blocks and delete collection. - Create another collection with multiple streams and blocks; backdate it's first block and delete the collection - After datamanager run: expect only the undeleted blocks from second collection, and none of the backdated blocks. -*/ -func TestPutAndGetCollectionsWithMultipleStreamsAndBlocks(t *testing.T) { +// Create some blocks and backdate some of them. +// Run datamanager while producing an error condition. +// Verify that the blocks are hence not deleted. +func testOldBlocksNotDeletedOnDataManagerError(t *testing.T, writeDataTo string, heapProfileFile string, expectError bool, expectOldBlocks bool) { defer TearDownDataManagerTest(t) SetupDataManagerTest(t) - // Put some blocks which will be backdated later on - collectionWithOldBlocks := createMultiStreamBlockCollection(t, "to be deleted collection with old blocks", 5, 5) - oldBlocks := geLocatorsFromCollection(t, collectionWithOldBlocks) - - collectionWithNewerBlocks := createMultiStreamBlockCollection(t, "to be deleted collection with newer and older blocks", 5, 5) - newerBlocks := geLocatorsFromCollection(t, collectionWithNewerBlocks) - - expected := []string{} - expected = append(expected, oldBlocks...) - expected = append(expected, newerBlocks...) - verifyBlocks(t, nil, expected, 2) - - // Backdate old blocks and delete the collection - backdateBlocks(t, oldBlocks) - deleteCollection(t, collectionWithOldBlocks) + // Put some blocks and backdate them. + var oldUnusedBlockLocators []string + oldUnusedBlockData := "this block will have older mtime" + for i := 0; i < 5; i++ { + oldUnusedBlockLocators = append(oldUnusedBlockLocators, putBlock(t, fmt.Sprintf("%s%d", oldUnusedBlockData, i))) + } + backdateBlocks(t, oldUnusedBlockLocators) - // Backdate first block from the newer blocks and delete the collection; the rest are still be reachable - backdateBlocks(t, newerBlocks[0:1]) - deleteCollection(t, collectionWithNewerBlocks) + // Run data manager + summary.WriteDataTo = writeDataTo + collection.HeapProfileFilename = heapProfileFile - // run datamanager - dataManagerSingleRun(t) - - notExpected := []string{} - notExpected = append(notExpected, oldBlocks...) - notExpected = append(notExpected, newerBlocks[0]) + err := singlerun(arv) + if !expectError { + if err != nil { + t.Fatalf("Got an error during datamanager singlerun: %v", err) + } + } else { + if err == nil { + t.Fatalf("Expected error during datamanager singlerun") + } + } + waitUntilQueuesFinishWork(t) - verifyBlocks(t, notExpected, newerBlocks[1:], 2) + // Get block indexes and verify that all backdated blocks are not/deleted as expected + if expectOldBlocks { + verifyBlocks(t, nil, oldUnusedBlockLocators, 2) + } else { + verifyBlocks(t, oldUnusedBlockLocators, nil, 2) + } } -/* - Create a collection with multiple blocks in one stream using arv-put - Generated manifest will be for the format: - . 83d2e2d0938718a56c9b0c518a4b2930+41+A4b671b8c7525c0af302365b03a44406999e42eec@565f7809 0:41:temp-test-file053866981 - . cb790454ba6cc9a3ffab377937e06225+41+Ab5460755c3480fb899025b74dc59fadb71402bfc@565f7809 0:41:temp-test-file213181952 -*/ -func createMultiBlockCollection(t *testing.T, data string, numBlocks int) string { - tempdir, err := ioutil.TempDir(os.TempDir(), "temp-test-dir") - defer os.Remove(tempdir) - - filenames := []string{} - for i := 0; i < numBlocks; i++ { - tempfile, err := ioutil.TempFile(tempdir, "temp-test-file") - defer os.Remove(tempfile.Name()) - - _, err = tempfile.Write([]byte(fmt.Sprintf("%s%d", data, i))) - if err != nil { - t.Fatalf("Error writing to tempfile %v", err) - } +// Create a collection with multiple streams and blocks +func createMultiStreamBlockCollection(t *testing.T, data string, numStreams, numBlocks int) (string, []string) { + defer switchToken(arvadostest.AdminToken)() - filenames = append(filenames, tempfile.Name()) + manifest := "" + locators := make(map[string]bool) + for s := 0; s < numStreams; s++ { + manifest += fmt.Sprintf("./stream%d ", s) + for b := 0; b < numBlocks; b++ { + locator, _, err := keepClient.PutB([]byte(fmt.Sprintf("%s in stream %d and block %d", data, s, b))) + if err != nil { + t.Fatalf("Error creating block %d in stream %d: %v", b, s, err) + } + locators[strings.Split(locator, "+A")[0]] = true + manifest += locator + " " + } + manifest += "0:1:dummyfile.txt\n" } - output, err := exec.Command("arv-put", filenames...).Output() + collection := make(Dict) + err := arv.Create("collections", + arvadosclient.Dict{"collection": arvadosclient.Dict{"manifest_text": manifest}}, + &collection) + if err != nil { - t.Fatalf("Error running arv-put %s", err) + t.Fatalf("Error creating collection %v", err) } - uuid := string(output[0:27]) // trim terminating char - return uuid + var locs []string + for k, _ := range locators { + locs = append(locs, k) + } + + return collection["uuid"].(string), locs } -/* - Create collection with multiple blocks with a single stream; backdate the blocks and delete collection. - Create another collection with multiple blocks; backdate it's first block and delete the collection - After datamanager run: expect only the undeleted blocks from second collection, and none of the backdated blocks. -*/ -func TestPutAndGetCollectionsWithMultipleBlocks(t *testing.T) { +// Create collection with multiple streams and blocks; backdate the blocks and but do not delete the collection. +// Also, create stray block and backdate it. +// After datamanager run: expect blocks from the collection, but not the stray block. +func TestManifestWithMultipleStreamsAndBlocks(t *testing.T) { defer TearDownDataManagerTest(t) SetupDataManagerTest(t) - // Put some blocks which will be backdated later on - collectionWithOldBlocks := createMultiBlockCollection(t, "to be deleted collection with old blocks", 5) - oldBlocks := geLocatorsFromCollection(t, collectionWithOldBlocks) + // create collection whose blocks will be backdated + collectionWithOldBlocks, oldBlocks := createMultiStreamBlockCollection(t, "old block", 100, 10) + if collectionWithOldBlocks == "" { + t.Fatalf("Failed to create collection with 1000 blocks") + } + if len(oldBlocks) != 1000 { + t.Fatalf("Not all blocks are created: expected %v, found %v", 1000, len(oldBlocks)) + } - collectionWithNewerBlocks := createMultiBlockCollection(t, "to be deleted collection with newer and older blocks", 5) - newerBlocks := geLocatorsFromCollection(t, collectionWithNewerBlocks) + // create a stray block that will be backdated + strayOldBlock := putBlock(t, "this stray block is old") - expected := []string{} + expected := []string{strayOldBlock} expected = append(expected, oldBlocks...) - expected = append(expected, newerBlocks...) verifyBlocks(t, nil, expected, 2) - // Backdate old blocks and delete the collection + // Backdate old blocks; but the collection still references these blocks backdateBlocks(t, oldBlocks) - deleteCollection(t, collectionWithOldBlocks) - // Backdate first block from the newer blocks and delete the collection; the rest are still be reachable - backdateBlocks(t, newerBlocks[0:1]) - deleteCollection(t, collectionWithNewerBlocks) + // also backdate the stray old block + backdateBlocks(t, []string{strayOldBlock}) // run datamanager dataManagerSingleRun(t) - notExpected := []string{} - notExpected = append(notExpected, oldBlocks...) - notExpected = append(notExpected, newerBlocks[0]) - - verifyBlocks(t, notExpected, newerBlocks[1:], 2) + // verify that strayOldBlock is not to be found, but the collections blocks are still there + verifyBlocks(t, []string{strayOldBlock}, oldBlocks, 2) }