6 "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
7 "git.curoverse.com/arvados.git/sdk/go/arvadostest"
8 "git.curoverse.com/arvados.git/sdk/go/keepclient"
9 "git.curoverse.com/arvados.git/services/datamanager/collection"
10 "git.curoverse.com/arvados.git/services/datamanager/summary"
22 var arv arvadosclient.ArvadosClient
23 var keepClient *keepclient.KeepClient
24 var keepServers []string
26 func SetupDataManagerTest(t *testing.T) {
27 os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
29 // start api and keep servers
30 arvadostest.ResetEnv()
31 arvadostest.StartAPI()
32 arvadostest.StartKeep(2, false)
35 arv, err = arvadosclient.MakeArvadosClient()
37 t.Fatalf("Error making arvados client: %s", err)
39 arv.ApiToken = arvadostest.DataManagerToken
42 keepClient = &keepclient.KeepClient{
45 Client: &http.Client{},
48 // discover keep services
49 if err = keepClient.DiscoverKeepServers(); err != nil {
50 t.Fatalf("Error discovering keep services: %s", err)
52 keepServers = []string{}
53 for _, host := range keepClient.LocalRoots() {
54 keepServers = append(keepServers, host)
58 func TearDownDataManagerTest(t *testing.T) {
59 arvadostest.StopKeep(2)
61 summary.WriteDataTo = ""
62 collection.HeapProfileFilename = ""
65 func putBlock(t *testing.T, data string) string {
66 locator, _, err := keepClient.PutB([]byte(data))
68 t.Fatalf("Error putting test data for %s %s %v", data, locator, err)
71 t.Fatalf("No locator found after putting test data")
74 splits := strings.Split(locator, "+")
75 return splits[0] + "+" + splits[1]
78 func getBlock(t *testing.T, locator string, data string) {
79 reader, blocklen, _, err := keepClient.Get(locator)
81 t.Fatalf("Error getting test data in setup for %s %s %v", data, locator, err)
84 t.Fatalf("No reader found after putting test data")
86 if blocklen != int64(len(data)) {
87 t.Fatalf("blocklen %d did not match data len %d", blocklen, len(data))
90 all, err := ioutil.ReadAll(reader)
91 if string(all) != data {
92 t.Fatalf("Data read %s did not match expected data %s", string(all), data)
96 // Create a collection using arv-put
97 func createCollection(t *testing.T, data string) string {
98 tempfile, err := ioutil.TempFile(os.TempDir(), "temp-test-file")
99 defer os.Remove(tempfile.Name())
101 _, err = tempfile.Write([]byte(data))
103 t.Fatalf("Error writing to tempfile %v", err)
107 output, err := exec.Command("arv-put", "--use-filename", "test.txt", tempfile.Name()).Output()
109 t.Fatalf("Error running arv-put %s", err)
112 uuid := string(output[0:27]) // trim terminating char
116 // Get collection locator
117 var locatorMatcher = regexp.MustCompile(`^([0-9a-f]{32})\+(\d*)(.*)$`)
119 func getFirstLocatorFromCollection(t *testing.T, uuid string) string {
120 manifest := getCollection(t, uuid)["manifest_text"].(string)
122 locator := strings.Split(manifest, " ")[1]
123 match := locatorMatcher.FindStringSubmatch(locator)
125 t.Fatalf("No locator found in collection manifest %s", manifest)
128 return match[1] + "+" + match[2]
131 func switchToken(t string) func() {
140 func getCollection(t *testing.T, uuid string) Dict {
141 defer switchToken(arvadostest.AdminToken)()
143 getback := make(Dict)
144 err := arv.Get("collections", uuid, nil, &getback)
146 t.Fatalf("Error getting collection %s", err)
148 if getback["uuid"] != uuid {
149 t.Fatalf("Get collection uuid did not match original: $s, result: $s", uuid, getback["uuid"])
155 func updateCollection(t *testing.T, uuid string, paramName string, paramValue string) {
156 defer switchToken(arvadostest.AdminToken)()
158 err := arv.Update("collections", uuid, arvadosclient.Dict{
159 "collection": arvadosclient.Dict{
160 paramName: paramValue,
162 }, &arvadosclient.Dict{})
165 t.Fatalf("Error updating collection %s", err)
169 type Dict map[string]interface{}
171 func deleteCollection(t *testing.T, uuid string) {
172 defer switchToken(arvadostest.AdminToken)()
174 getback := make(Dict)
175 err := arv.Delete("collections", uuid, nil, &getback)
177 t.Fatalf("Error deleting collection %s", err)
179 if getback["uuid"] != uuid {
180 t.Fatalf("Delete collection uuid did not match original: $s, result: $s", uuid, getback["uuid"])
184 func dataManagerSingleRun(t *testing.T) {
185 err := singlerun(arv)
187 t.Fatalf("Error during singlerun %s", err)
191 func getBlockIndexesForServer(t *testing.T, i int) []string {
194 path := keepServers[i] + "/index"
195 client := http.Client{}
196 req, err := http.NewRequest("GET", path, nil)
197 req.Header.Add("Authorization", "OAuth2 "+arvadostest.DataManagerToken)
198 req.Header.Add("Content-Type", "application/octet-stream")
199 resp, err := client.Do(req)
200 defer resp.Body.Close()
203 t.Fatalf("Error during %s %s", path, err)
206 body, err := ioutil.ReadAll(resp.Body)
208 t.Fatalf("Error reading response from %s %s", path, err)
211 lines := strings.Split(string(body), "\n")
212 for _, line := range lines {
213 indexes = append(indexes, strings.Split(line, " ")...)
219 func getBlockIndexes(t *testing.T) [][]string {
220 var indexes [][]string
222 for i := 0; i < len(keepServers); i++ {
223 indexes = append(indexes, getBlockIndexesForServer(t, i))
228 func verifyBlocks(t *testing.T, notExpected []string, expected []string, minReplication int) {
229 blocks := getBlockIndexes(t)
231 for _, block := range notExpected {
232 for _, idx := range blocks {
233 if valueInArray(block, idx) {
234 t.Fatalf("Found unexpected block %s", block)
239 for _, block := range expected {
241 for _, idx := range blocks {
242 if valueInArray(block, idx) {
246 if nFound < minReplication {
247 t.Fatalf("Found %d replicas of block %s, expected >= %d", nFound, block, minReplication)
252 func valueInArray(value string, list []string) bool {
253 for _, v := range list {
261 // Test env uses two keep volumes. The volume names can be found by reading the files
262 // ARVADOS_HOME/tmp/keep0.volume and ARVADOS_HOME/tmp/keep1.volume
264 // The keep volumes are of the dir structure: volumeN/subdir/locator
265 func backdateBlocks(t *testing.T, oldUnusedBlockLocators []string) {
266 // First get rid of any size hints in the locators
267 var trimmedBlockLocators []string
268 for _, block := range oldUnusedBlockLocators {
269 trimmedBlockLocators = append(trimmedBlockLocators, strings.Split(block, "+")[0])
272 // Get the working dir so that we can read keep{n}.volume files
273 wd, err := os.Getwd()
275 t.Fatalf("Error getting working dir %s", err)
278 // Now cycle through the two keep volumes
279 oldTime := time.Now().AddDate(0, -2, 0)
280 for i := 0; i < 2; i++ {
281 filename := fmt.Sprintf("%s/../../tmp/keep%d.volume", wd, i)
282 volumeDir, err := ioutil.ReadFile(filename)
284 t.Fatalf("Error reading keep volume file %s %s", filename, err)
287 // Read the keep volume dir structure
288 volumeContents, err := ioutil.ReadDir(string(volumeDir))
290 t.Fatalf("Error reading keep dir %s %s", string(volumeDir), err)
293 // Read each subdir for each of the keep volume dir
294 for _, subdir := range volumeContents {
295 subdirName := fmt.Sprintf("%s/%s", volumeDir, subdir.Name())
296 subdirContents, err := ioutil.ReadDir(string(subdirName))
298 t.Fatalf("Error reading keep dir %s %s", string(subdirName), err)
301 // Now we got to the files. The files are names are the block locators
302 for _, fileInfo := range subdirContents {
303 blockName := fileInfo.Name()
304 myname := fmt.Sprintf("%s/%s", subdirName, blockName)
305 if valueInArray(blockName, trimmedBlockLocators) {
306 err = os.Chtimes(myname, oldTime, oldTime)
313 func getStatus(t *testing.T, path string) interface{} {
314 client := http.Client{}
315 req, err := http.NewRequest("GET", path, nil)
316 req.Header.Add("Authorization", "OAuth2 "+arvadostest.DataManagerToken)
317 req.Header.Add("Content-Type", "application/octet-stream")
318 resp, err := client.Do(req)
320 t.Fatalf("Error during %s %s", path, err)
322 defer resp.Body.Close()
325 json.NewDecoder(resp.Body).Decode(&s)
330 // Wait until PullQueue and TrashQueue are empty on all keepServers.
331 func waitUntilQueuesFinishWork(t *testing.T) {
332 for _, ks := range keepServers {
333 for done := false; !done; {
334 time.Sleep(100 * time.Millisecond)
335 s := getStatus(t, ks+"/status.json")
336 for _, qName := range []string{"PullQueue", "TrashQueue"} {
337 qStatus := s.(map[string]interface{})[qName].(map[string]interface{})
338 if qStatus["Queued"].(float64)+qStatus["InProgress"].(float64) == 0 {
346 // Create some blocks and backdate some of them.
347 // Also create some collections and delete some of them.
348 // Verify block indexes.
349 func TestPutAndGetBlocks(t *testing.T) {
350 defer TearDownDataManagerTest(t)
351 SetupDataManagerTest(t)
353 // Put some blocks which will be backdated later on
354 // The first one will also be used in a collection and hence should not be deleted when datamanager runs.
355 // The rest will be old and unreferenced and hence should be deleted when datamanager runs.
356 var oldUnusedBlockLocators []string
357 oldUnusedBlockData := "this block will have older mtime"
358 for i := 0; i < 5; i++ {
359 oldUnusedBlockLocators = append(oldUnusedBlockLocators, putBlock(t, fmt.Sprintf("%s%d", oldUnusedBlockData, i)))
361 for i := 0; i < 5; i++ {
362 getBlock(t, oldUnusedBlockLocators[i], fmt.Sprintf("%s%d", oldUnusedBlockData, i))
365 // The rest will be old and unreferenced and hence should be deleted when datamanager runs.
366 oldUsedBlockData := "this collection block will have older mtime"
367 oldUsedBlockLocator := putBlock(t, oldUsedBlockData)
368 getBlock(t, oldUsedBlockLocator, oldUsedBlockData)
370 // Put some more blocks which will not be backdated; hence they are still new, but not in any collection.
371 // Hence, even though unreferenced, these should not be deleted when datamanager runs.
372 var newBlockLocators []string
373 newBlockData := "this block is newer"
374 for i := 0; i < 5; i++ {
375 newBlockLocators = append(newBlockLocators, putBlock(t, fmt.Sprintf("%s%d", newBlockData, i)))
377 for i := 0; i < 5; i++ {
378 getBlock(t, newBlockLocators[i], fmt.Sprintf("%s%d", newBlockData, i))
381 // Create a collection that would be deleted later on
382 toBeDeletedCollectionUUID := createCollection(t, "some data for collection creation")
383 toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUUID)
385 // Create another collection that has the same data as the one of the old blocks
386 oldUsedBlockCollectionUUID := createCollection(t, oldUsedBlockData)
387 oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUUID)
388 if oldUsedBlockCollectionLocator != oldUsedBlockLocator {
389 t.Fatalf("Locator of the collection with the same data as old block is different %s", oldUsedBlockCollectionLocator)
392 // Create another collection whose replication level will be changed
393 replicationCollectionUUID := createCollection(t, "replication level on this collection will be reduced")
394 replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUUID)
396 // Create two collections with same data; one will be deleted later on
397 dataForTwoCollections := "one of these collections will be deleted"
398 oneOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
399 oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUUID)
400 secondOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
401 secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUUID)
402 if oneOfTwoWithSameDataLocator != secondOfTwoWithSameDataLocator {
403 t.Fatalf("Locators for both these collections expected to be same: %s %s", oneOfTwoWithSameDataLocator, secondOfTwoWithSameDataLocator)
406 // create collection with empty manifest text
407 emptyBlockLocator := putBlock(t, "")
408 emptyCollection := createCollection(t, "")
410 // Verify blocks before doing any backdating / deleting.
411 var expected []string
412 expected = append(expected, oldUnusedBlockLocators...)
413 expected = append(expected, newBlockLocators...)
414 expected = append(expected, toBeDeletedCollectionLocator)
415 expected = append(expected, replicationCollectionLocator)
416 expected = append(expected, oneOfTwoWithSameDataLocator)
417 expected = append(expected, secondOfTwoWithSameDataLocator)
418 expected = append(expected, emptyBlockLocator)
420 verifyBlocks(t, nil, expected, 2)
422 // Run datamanager in singlerun mode
423 dataManagerSingleRun(t)
424 waitUntilQueuesFinishWork(t)
426 verifyBlocks(t, nil, expected, 2)
428 // Backdate the to-be old blocks and delete the collections
429 backdateBlocks(t, oldUnusedBlockLocators)
430 deleteCollection(t, toBeDeletedCollectionUUID)
431 deleteCollection(t, secondOfTwoWithSameDataUUID)
432 backdateBlocks(t, []string{emptyBlockLocator})
433 deleteCollection(t, emptyCollection)
435 // Run data manager again
436 dataManagerSingleRun(t)
437 waitUntilQueuesFinishWork(t)
439 // Get block indexes and verify that all backdated blocks except the first one used in collection are not included.
440 expected = expected[:0]
441 expected = append(expected, oldUsedBlockLocator)
442 expected = append(expected, newBlockLocators...)
443 expected = append(expected, toBeDeletedCollectionLocator)
444 expected = append(expected, oneOfTwoWithSameDataLocator)
445 expected = append(expected, secondOfTwoWithSameDataLocator)
446 expected = append(expected, emptyBlockLocator) // even when unreferenced, this remains
448 verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
450 // Reduce desired replication on replicationCollectionUUID
451 // collection, and verify that Data Manager does not reduce
452 // actual replication any further than that. (It might not
453 // reduce actual replication at all; that's OK for this test.)
455 // Reduce desired replication level.
456 updateCollection(t, replicationCollectionUUID, "replication_desired", "1")
457 collection := getCollection(t, replicationCollectionUUID)
458 if collection["replication_desired"].(interface{}) != float64(1) {
459 t.Fatalf("After update replication_desired is not 1; instead it is %v", collection["replication_desired"])
462 // Verify data is currently overreplicated.
463 verifyBlocks(t, nil, []string{replicationCollectionLocator}, 2)
465 // Run data manager again
466 dataManagerSingleRun(t)
467 waitUntilQueuesFinishWork(t)
469 // Verify data is not underreplicated.
470 verifyBlocks(t, nil, []string{replicationCollectionLocator}, 1)
472 // Verify *other* collections' data is not underreplicated.
473 verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
476 func TestDatamanagerSingleRunRepeatedly(t *testing.T) {
477 defer TearDownDataManagerTest(t)
478 SetupDataManagerTest(t)
480 for i := 0; i < 10; i++ {
481 err := singlerun(arv)
483 t.Fatalf("Got an error during datamanager singlerun: %v", err)
488 func TestGetStatusRepeatedly(t *testing.T) {
489 defer TearDownDataManagerTest(t)
490 SetupDataManagerTest(t)
492 for i := 0; i < 10; i++ {
493 for j := 0; j < 2; j++ {
494 s := getStatus(t, keepServers[j]+"/status.json")
496 var pullQueueStatus interface{}
497 pullQueueStatus = s.(map[string]interface{})["PullQueue"]
498 var trashQueueStatus interface{}
499 trashQueueStatus = s.(map[string]interface{})["TrashQueue"]
501 if pullQueueStatus.(map[string]interface{})["Queued"] == nil ||
502 pullQueueStatus.(map[string]interface{})["InProgress"] == nil ||
503 trashQueueStatus.(map[string]interface{})["Queued"] == nil ||
504 trashQueueStatus.(map[string]interface{})["InProgress"] == nil {
505 t.Fatalf("PullQueue and TrashQueue status not found")
508 time.Sleep(100 * time.Millisecond)
513 func TestRunDatamanagerWithBogusServer(t *testing.T) {
514 defer TearDownDataManagerTest(t)
515 SetupDataManagerTest(t)
517 arv.ApiServer = "bogus-server"
519 err := singlerun(arv)
521 t.Fatalf("Expected error during singlerun with bogus server")
525 func TestRunDatamanagerAsNonAdminUser(t *testing.T) {
526 defer TearDownDataManagerTest(t)
527 SetupDataManagerTest(t)
529 arv.ApiToken = arvadostest.ActiveToken
531 err := singlerun(arv)
533 t.Fatalf("Expected error during singlerun as non-admin user")
537 func TestPutAndGetBlocks_NoErrorDuringSingleRun(t *testing.T) {
538 testOldBlocksNotDeletedOnDataManagerError(t, "", "", false, false)
541 func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadWriteTo(t *testing.T) {
542 badpath, err := arvadostest.CreateBadPath()
544 t.Fatalf(err.Error())
547 err = arvadostest.DestroyBadPath(badpath)
549 t.Fatalf(err.Error())
552 testOldBlocksNotDeletedOnDataManagerError(t, path.Join(badpath, "writetofile"), "", true, true)
555 func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadHeapProfileFilename(t *testing.T) {
556 badpath, err := arvadostest.CreateBadPath()
558 t.Fatalf(err.Error())
561 err = arvadostest.DestroyBadPath(badpath)
563 t.Fatalf(err.Error())
566 testOldBlocksNotDeletedOnDataManagerError(t, "", path.Join(badpath, "heapprofilefile"), true, true)
569 // Create some blocks and backdate some of them.
570 // Run datamanager while producing an error condition.
571 // Verify that the blocks are hence not deleted.
572 func testOldBlocksNotDeletedOnDataManagerError(t *testing.T, writeDataTo string, heapProfileFile string, expectError bool, expectOldBlocks bool) {
573 defer TearDownDataManagerTest(t)
574 SetupDataManagerTest(t)
576 // Put some blocks and backdate them.
577 var oldUnusedBlockLocators []string
578 oldUnusedBlockData := "this block will have older mtime"
579 for i := 0; i < 5; i++ {
580 oldUnusedBlockLocators = append(oldUnusedBlockLocators, putBlock(t, fmt.Sprintf("%s%d", oldUnusedBlockData, i)))
582 backdateBlocks(t, oldUnusedBlockLocators)
585 summary.WriteDataTo = writeDataTo
586 collection.HeapProfileFilename = heapProfileFile
588 err := singlerun(arv)
591 t.Fatalf("Got an error during datamanager singlerun: %v", err)
595 t.Fatalf("Expected error during datamanager singlerun")
598 waitUntilQueuesFinishWork(t)
600 // Get block indexes and verify that all backdated blocks are not/deleted as expected
602 verifyBlocks(t, nil, oldUnusedBlockLocators, 2)
604 verifyBlocks(t, oldUnusedBlockLocators, nil, 2)
608 // Create a collection with multiple streams and blocks
609 func createMultiStreamBlockCollection(t *testing.T, data string, numStreams, numBlocks int) (string, []string) {
610 defer switchToken(arvadostest.AdminToken)()
613 locators := make(map[string]bool)
614 for s := 0; s < numStreams; s++ {
615 manifest += fmt.Sprintf("./stream%d ", s)
616 for b := 0; b < numBlocks; b++ {
617 locator, _, err := keepClient.PutB([]byte(fmt.Sprintf("%s in stream %d and block %d", data, s, b)))
619 t.Fatalf("Error creating block %d in stream %d: %v", b, s, err)
621 locators[strings.Split(locator, "+A")[0]] = true
622 manifest += locator + " "
624 manifest += "0:1:dummyfile.txt\n"
627 collection := make(Dict)
628 err := arv.Create("collections",
629 arvadosclient.Dict{"collection": arvadosclient.Dict{"manifest_text": manifest}},
633 t.Fatalf("Error creating collection %v", err)
637 for k := range locators {
638 locs = append(locs, k)
641 return collection["uuid"].(string), locs
644 // Create collection with multiple streams and blocks; backdate the blocks and but do not delete the collection.
645 // Also, create stray block and backdate it.
646 // After datamanager run: expect blocks from the collection, but not the stray block.
647 func TestManifestWithMultipleStreamsAndBlocks(t *testing.T) {
648 testManifestWithMultipleStreamsAndBlocks(t, 100, 10, "", false)
651 // Same test as TestManifestWithMultipleStreamsAndBlocks with an additional
652 // keepstore of a service type other than "disk". Only the "disk" type services
653 // will be indexed by datamanager and hence should work the same way.
654 func TestManifestWithMultipleStreamsAndBlocks_WithOneUnsupportedKeepServer(t *testing.T) {
655 testManifestWithMultipleStreamsAndBlocks(t, 2, 2, "testblobstore", false)
658 // Test datamanager with dry-run. Expect no block to be deleted.
659 func TestManifestWithMultipleStreamsAndBlocks_DryRun(t *testing.T) {
660 testManifestWithMultipleStreamsAndBlocks(t, 2, 2, "", true)
663 func testManifestWithMultipleStreamsAndBlocks(t *testing.T, numStreams, numBlocks int, createExtraKeepServerWithType string, isDryRun bool) {
664 defer TearDownDataManagerTest(t)
665 SetupDataManagerTest(t)
667 // create collection whose blocks will be backdated
668 collectionWithOldBlocks, oldBlocks := createMultiStreamBlockCollection(t, "old block", numStreams, numBlocks)
669 if collectionWithOldBlocks == "" {
670 t.Fatalf("Failed to create collection with %d blocks", numStreams*numBlocks)
672 if len(oldBlocks) != numStreams*numBlocks {
673 t.Fatalf("Not all blocks are created: expected %v, found %v", 1000, len(oldBlocks))
676 // create a stray block that will be backdated
677 strayOldBlock := putBlock(t, "this stray block is old")
679 expected := []string{strayOldBlock}
680 expected = append(expected, oldBlocks...)
681 verifyBlocks(t, nil, expected, 2)
683 // Backdate old blocks; but the collection still references these blocks
684 backdateBlocks(t, oldBlocks)
686 // also backdate the stray old block
687 backdateBlocks(t, []string{strayOldBlock})
689 // If requested, create an extra keepserver with the given type
690 // This should be ignored during indexing and hence not change the datamanager outcome
691 var extraKeepServerUUID string
692 if createExtraKeepServerWithType != "" {
693 extraKeepServerUUID = addExtraKeepServer(t, createExtraKeepServerWithType)
694 defer deleteExtraKeepServer(extraKeepServerUUID)
699 dataManagerSingleRun(t)
702 // verify that all blocks, including strayOldBlock, are still to be found
703 verifyBlocks(t, nil, expected, 2)
705 // verify that strayOldBlock is not to be found, but the collections blocks are still there
706 verifyBlocks(t, []string{strayOldBlock}, oldBlocks, 2)
710 // Add one more keepstore with the given service type
711 func addExtraKeepServer(t *testing.T, serviceType string) string {
712 defer switchToken(arvadostest.AdminToken)()
714 extraKeepService := make(arvadosclient.Dict)
715 err := arv.Create("keep_services",
716 arvadosclient.Dict{"keep_service": arvadosclient.Dict{
717 "service_host": "localhost",
718 "service_port": "21321",
719 "service_ssl_flag": false,
720 "service_type": serviceType}},
726 return extraKeepService["uuid"].(string)
729 func deleteExtraKeepServer(uuid string) {
730 defer switchToken(arvadostest.AdminToken)()
731 arv.Delete("keep_services", uuid, nil, nil)