6 "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
7 "git.curoverse.com/arvados.git/sdk/go/arvadostest"
8 "git.curoverse.com/arvados.git/sdk/go/keepclient"
9 "git.curoverse.com/arvados.git/services/datamanager/collection"
10 "git.curoverse.com/arvados.git/services/datamanager/summary"
21 var arv arvadosclient.ArvadosClient
22 var keepClient *keepclient.KeepClient
23 var keepServers []string
25 func SetupDataManagerTest(t *testing.T) {
26 os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
28 // start api and keep servers
29 arvadostest.ResetEnv()
30 arvadostest.StartAPI()
31 arvadostest.StartKeep(2, false)
34 arv, err = arvadosclient.MakeArvadosClient()
36 t.Fatalf("Error making arvados client: %s", err)
38 arv.ApiToken = arvadostest.DataManagerToken
41 keepClient = &keepclient.KeepClient{
45 Client: &http.Client{},
48 // discover keep services
49 if err = keepClient.DiscoverKeepServers(); err != nil {
50 t.Fatalf("Error discovering keep services: %s", err)
52 keepServers = []string{}
53 for _, host := range keepClient.LocalRoots() {
54 keepServers = append(keepServers, host)
58 func TearDownDataManagerTest(t *testing.T) {
59 arvadostest.StopKeep(2)
63 func putBlock(t *testing.T, data string) string {
64 locator, _, err := keepClient.PutB([]byte(data))
66 t.Fatalf("Error putting test data for %s %s %v", data, locator, err)
69 t.Fatalf("No locator found after putting test data")
72 splits := strings.Split(locator, "+")
73 return splits[0] + "+" + splits[1]
76 func getBlock(t *testing.T, locator string, data string) {
77 reader, blocklen, _, err := keepClient.Get(locator)
79 t.Fatalf("Error getting test data in setup for %s %s %v", data, locator, err)
82 t.Fatalf("No reader found after putting test data")
84 if blocklen != int64(len(data)) {
85 t.Fatalf("blocklen %d did not match data len %d", blocklen, len(data))
88 all, err := ioutil.ReadAll(reader)
89 if string(all) != data {
90 t.Fatalf("Data read %s did not match expected data %s", string(all), data)
94 // Create a collection using arv-put
95 func createCollection(t *testing.T, data string) string {
96 tempfile, err := ioutil.TempFile(os.TempDir(), "temp-test-file")
97 defer os.Remove(tempfile.Name())
99 _, err = tempfile.Write([]byte(data))
101 t.Fatalf("Error writing to tempfile %v", err)
105 output, err := exec.Command("arv-put", "--use-filename", "test.txt", tempfile.Name()).Output()
107 t.Fatalf("Error running arv-put %s", err)
110 uuid := string(output[0:27]) // trim terminating char
114 // Get collection locator
115 var locatorMatcher = regexp.MustCompile(`^([0-9a-f]{32})\+(\d*)(.*)$`)
117 func getFirstLocatorFromCollection(t *testing.T, uuid string) string {
118 manifest := getCollection(t, uuid)["manifest_text"].(string)
120 locator := strings.Split(manifest, " ")[1]
121 match := locatorMatcher.FindStringSubmatch(locator)
123 t.Fatalf("No locator found in collection manifest %s", manifest)
126 return match[1] + "+" + match[2]
129 func switchToken(t string) func() {
138 func getCollection(t *testing.T, uuid string) Dict {
139 defer switchToken(arvadostest.AdminToken)()
141 getback := make(Dict)
142 err := arv.Get("collections", uuid, nil, &getback)
144 t.Fatalf("Error getting collection %s", err)
146 if getback["uuid"] != uuid {
147 t.Fatalf("Get collection uuid did not match original: $s, result: $s", uuid, getback["uuid"])
153 func updateCollection(t *testing.T, uuid string, paramName string, paramValue string) {
154 defer switchToken(arvadostest.AdminToken)()
156 err := arv.Update("collections", uuid, arvadosclient.Dict{
157 "collection": arvadosclient.Dict{
158 paramName: paramValue,
160 }, &arvadosclient.Dict{})
163 t.Fatalf("Error updating collection %s", err)
167 type Dict map[string]interface{}
169 func deleteCollection(t *testing.T, uuid string) {
170 defer switchToken(arvadostest.AdminToken)()
172 getback := make(Dict)
173 err := arv.Delete("collections", uuid, nil, &getback)
175 t.Fatalf("Error deleting collection %s", err)
177 if getback["uuid"] != uuid {
178 t.Fatalf("Delete collection uuid did not match original: $s, result: $s", uuid, getback["uuid"])
182 func dataManagerSingleRun(t *testing.T) {
183 err := singlerun(arv)
185 t.Fatalf("Error during singlerun %s", err)
189 func getBlockIndexesForServer(t *testing.T, i int) []string {
192 path := keepServers[i] + "/index"
193 client := http.Client{}
194 req, err := http.NewRequest("GET", path, nil)
195 req.Header.Add("Authorization", "OAuth2 "+arvadostest.DataManagerToken)
196 req.Header.Add("Content-Type", "application/octet-stream")
197 resp, err := client.Do(req)
198 defer resp.Body.Close()
201 t.Fatalf("Error during %s %s", path, err)
204 body, err := ioutil.ReadAll(resp.Body)
206 t.Fatalf("Error reading response from %s %s", path, err)
209 lines := strings.Split(string(body), "\n")
210 for _, line := range lines {
211 indexes = append(indexes, strings.Split(line, " ")...)
217 func getBlockIndexes(t *testing.T) [][]string {
218 var indexes [][]string
220 for i := 0; i < len(keepServers); i++ {
221 indexes = append(indexes, getBlockIndexesForServer(t, i))
226 func verifyBlocks(t *testing.T, notExpected []string, expected []string, minReplication int) {
227 blocks := getBlockIndexes(t)
229 for _, block := range notExpected {
230 for _, idx := range blocks {
231 if valueInArray(block, idx) {
232 t.Fatalf("Found unexpected block %s", block)
237 for _, block := range expected {
239 for _, idx := range blocks {
240 if valueInArray(block, idx) {
244 if nFound < minReplication {
245 t.Fatalf("Found %d replicas of block %s, expected >= %d", nFound, block, minReplication)
250 func valueInArray(value string, list []string) bool {
251 for _, v := range list {
260 Test env uses two keep volumes. The volume names can be found by reading the files
261 ARVADOS_HOME/tmp/keep0.volume and ARVADOS_HOME/tmp/keep1.volume
263 The keep volumes are of the dir structure:
264 volumeN/subdir/locator
266 func backdateBlocks(t *testing.T, oldUnusedBlockLocators []string) {
267 // First get rid of any size hints in the locators
268 var trimmedBlockLocators []string
269 for _, block := range oldUnusedBlockLocators {
270 trimmedBlockLocators = append(trimmedBlockLocators, strings.Split(block, "+")[0])
273 // Get the working dir so that we can read keep{n}.volume files
274 wd, err := os.Getwd()
276 t.Fatalf("Error getting working dir %s", err)
279 // Now cycle through the two keep volumes
280 oldTime := time.Now().AddDate(0, -2, 0)
281 for i := 0; i < 2; i++ {
282 filename := fmt.Sprintf("%s/../../tmp/keep%d.volume", wd, i)
283 volumeDir, err := ioutil.ReadFile(filename)
285 t.Fatalf("Error reading keep volume file %s %s", filename, err)
288 // Read the keep volume dir structure
289 volumeContents, err := ioutil.ReadDir(string(volumeDir))
291 t.Fatalf("Error reading keep dir %s %s", string(volumeDir), err)
294 // Read each subdir for each of the keep volume dir
295 for _, subdir := range volumeContents {
296 subdirName := fmt.Sprintf("%s/%s", volumeDir, subdir.Name())
297 subdirContents, err := ioutil.ReadDir(string(subdirName))
299 t.Fatalf("Error reading keep dir %s %s", string(subdirName), err)
302 // Now we got to the files. The files are names are the block locators
303 for _, fileInfo := range subdirContents {
304 blockName := fileInfo.Name()
305 myname := fmt.Sprintf("%s/%s", subdirName, blockName)
306 if valueInArray(blockName, trimmedBlockLocators) {
307 err = os.Chtimes(myname, oldTime, oldTime)
314 func getStatus(t *testing.T, path string) interface{} {
315 client := http.Client{}
316 req, err := http.NewRequest("GET", path, nil)
317 req.Header.Add("Authorization", "OAuth2 "+arvadostest.DataManagerToken)
318 req.Header.Add("Content-Type", "application/octet-stream")
319 resp, err := client.Do(req)
321 t.Fatalf("Error during %s %s", path, err)
323 defer resp.Body.Close()
326 json.NewDecoder(resp.Body).Decode(&s)
331 // Wait until PullQueue and TrashQueue are empty on all keepServers.
332 func waitUntilQueuesFinishWork(t *testing.T) {
333 for _, ks := range keepServers {
334 for done := false; !done; {
335 time.Sleep(100 * time.Millisecond)
336 s := getStatus(t, ks+"/status.json")
337 for _, qName := range []string{"PullQueue", "TrashQueue"} {
338 qStatus := s.(map[string]interface{})[qName].(map[string]interface{})
339 if qStatus["Queued"].(float64)+qStatus["InProgress"].(float64) == 0 {
348 Create some blocks and backdate some of them.
349 Also create some collections and delete some of them.
350 Verify block indexes.
352 func TestPutAndGetBlocks(t *testing.T) {
353 defer TearDownDataManagerTest(t)
354 SetupDataManagerTest(t)
356 // Put some blocks which will be backdated later on
357 // The first one will also be used in a collection and hence should not be deleted when datamanager runs.
358 // The rest will be old and unreferenced and hence should be deleted when datamanager runs.
359 var oldUnusedBlockLocators []string
360 oldUnusedBlockData := "this block will have older mtime"
361 for i := 0; i < 5; i++ {
362 oldUnusedBlockLocators = append(oldUnusedBlockLocators, putBlock(t, fmt.Sprintf("%s%d", oldUnusedBlockData, i)))
364 for i := 0; i < 5; i++ {
365 getBlock(t, oldUnusedBlockLocators[i], fmt.Sprintf("%s%d", oldUnusedBlockData, i))
368 // The rest will be old and unreferenced and hence should be deleted when datamanager runs.
369 oldUsedBlockData := "this collection block will have older mtime"
370 oldUsedBlockLocator := putBlock(t, oldUsedBlockData)
371 getBlock(t, oldUsedBlockLocator, oldUsedBlockData)
373 // Put some more blocks which will not be backdated; hence they are still new, but not in any collection.
374 // Hence, even though unreferenced, these should not be deleted when datamanager runs.
375 var newBlockLocators []string
376 newBlockData := "this block is newer"
377 for i := 0; i < 5; i++ {
378 newBlockLocators = append(newBlockLocators, putBlock(t, fmt.Sprintf("%s%d", newBlockData, i)))
380 for i := 0; i < 5; i++ {
381 getBlock(t, newBlockLocators[i], fmt.Sprintf("%s%d", newBlockData, i))
384 // Create a collection that would be deleted later on
385 toBeDeletedCollectionUUID := createCollection(t, "some data for collection creation")
386 toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUUID)
388 // Create another collection that has the same data as the one of the old blocks
389 oldUsedBlockCollectionUUID := createCollection(t, oldUsedBlockData)
390 oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUUID)
391 if oldUsedBlockCollectionLocator != oldUsedBlockLocator {
392 t.Fatalf("Locator of the collection with the same data as old block is different %s", oldUsedBlockCollectionLocator)
395 // Create another collection whose replication level will be changed
396 replicationCollectionUUID := createCollection(t, "replication level on this collection will be reduced")
397 replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUUID)
399 // Create two collections with same data; one will be deleted later on
400 dataForTwoCollections := "one of these collections will be deleted"
401 oneOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
402 oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUUID)
403 secondOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
404 secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUUID)
405 if oneOfTwoWithSameDataLocator != secondOfTwoWithSameDataLocator {
406 t.Fatalf("Locators for both these collections expected to be same: %s %s", oneOfTwoWithSameDataLocator, secondOfTwoWithSameDataLocator)
409 // Verify blocks before doing any backdating / deleting.
410 var expected []string
411 expected = append(expected, oldUnusedBlockLocators...)
412 expected = append(expected, newBlockLocators...)
413 expected = append(expected, toBeDeletedCollectionLocator)
414 expected = append(expected, replicationCollectionLocator)
415 expected = append(expected, oneOfTwoWithSameDataLocator)
416 expected = append(expected, secondOfTwoWithSameDataLocator)
418 verifyBlocks(t, nil, expected, 2)
420 // Run datamanager in singlerun mode
421 dataManagerSingleRun(t)
422 waitUntilQueuesFinishWork(t)
424 verifyBlocks(t, nil, expected, 2)
426 // Backdate the to-be old blocks and delete the collections
427 backdateBlocks(t, oldUnusedBlockLocators)
428 deleteCollection(t, toBeDeletedCollectionUUID)
429 deleteCollection(t, secondOfTwoWithSameDataUUID)
431 // Run data manager again
432 dataManagerSingleRun(t)
433 waitUntilQueuesFinishWork(t)
435 // Get block indexes and verify that all backdated blocks except the first one used in collection are not included.
436 expected = expected[:0]
437 expected = append(expected, oldUsedBlockLocator)
438 expected = append(expected, newBlockLocators...)
439 expected = append(expected, toBeDeletedCollectionLocator)
440 expected = append(expected, oneOfTwoWithSameDataLocator)
441 expected = append(expected, secondOfTwoWithSameDataLocator)
443 verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
445 // Reduce desired replication on replicationCollectionUUID
446 // collection, and verify that Data Manager does not reduce
447 // actual replication any further than that. (It might not
448 // reduce actual replication at all; that's OK for this test.)
450 // Reduce desired replication level.
451 updateCollection(t, replicationCollectionUUID, "replication_desired", "1")
452 collection := getCollection(t, replicationCollectionUUID)
453 if collection["replication_desired"].(interface{}) != float64(1) {
454 t.Fatalf("After update replication_desired is not 1; instead it is %v", collection["replication_desired"])
457 // Verify data is currently overreplicated.
458 verifyBlocks(t, nil, []string{replicationCollectionLocator}, 2)
460 // Run data manager again
461 dataManagerSingleRun(t)
462 waitUntilQueuesFinishWork(t)
464 // Verify data is not underreplicated.
465 verifyBlocks(t, nil, []string{replicationCollectionLocator}, 1)
467 // Verify *other* collections' data is not underreplicated.
468 verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
471 func TestDatamanagerSingleRunRepeatedly(t *testing.T) {
472 defer TearDownDataManagerTest(t)
473 SetupDataManagerTest(t)
475 for i := 0; i < 10; i++ {
476 err := singlerun(arv)
478 t.Fatalf("Got an error during datamanager singlerun: %v", err)
483 func TestGetStatusRepeatedly(t *testing.T) {
484 defer TearDownDataManagerTest(t)
485 SetupDataManagerTest(t)
487 for i := 0; i < 10; i++ {
488 for j := 0; j < 2; j++ {
489 s := getStatus(t, keepServers[j]+"/status.json")
491 var pullQueueStatus interface{}
492 pullQueueStatus = s.(map[string]interface{})["PullQueue"]
493 var trashQueueStatus interface{}
494 trashQueueStatus = s.(map[string]interface{})["TrashQueue"]
496 if pullQueueStatus.(map[string]interface{})["Queued"] == nil ||
497 pullQueueStatus.(map[string]interface{})["InProgress"] == nil ||
498 trashQueueStatus.(map[string]interface{})["Queued"] == nil ||
499 trashQueueStatus.(map[string]interface{})["InProgress"] == nil {
500 t.Fatalf("PullQueue and TrashQueue status not found")
503 time.Sleep(100 * time.Millisecond)
508 func TestRunDatamanagerWithBogusServer(t *testing.T) {
509 defer TearDownDataManagerTest(t)
510 SetupDataManagerTest(t)
512 arv.ApiServer = "bogus-server"
514 err := singlerun(arv)
516 t.Fatalf("Expected error during singlerun with bogus server")
520 func TestRunDatamanagerAsNonAdminUser(t *testing.T) {
521 defer TearDownDataManagerTest(t)
522 SetupDataManagerTest(t)
524 arv.ApiToken = arvadostest.ActiveToken
526 err := singlerun(arv)
528 t.Fatalf("Expected error during singlerun as non-admin user")
532 func TestPutAndGetBlocks_NoErrorDuringSingleRun(t *testing.T) {
533 testOldBlocksNotDeletedOnDataManagerError(t, "", "", false, false)
536 func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadWriteTo(t *testing.T) {
537 testOldBlocksNotDeletedOnDataManagerError(t, "/badwritetofile", "", true, true)
540 func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadHeapProfileFilename(t *testing.T) {
541 testOldBlocksNotDeletedOnDataManagerError(t, "", "/badheapprofilefile", true, true)
545 Create some blocks and backdate some of them.
546 Run datamanager while producing an error condition.
547 Verify that the blocks are hence not deleted.
549 func testOldBlocksNotDeletedOnDataManagerError(t *testing.T, writeDataTo string, heapProfileFile string, expectError bool, expectOldBlocks bool) {
550 defer TearDownDataManagerTest(t)
551 SetupDataManagerTest(t)
553 // Put some blocks and backdate them.
554 var oldUnusedBlockLocators []string
555 oldUnusedBlockData := "this block will have older mtime"
556 for i := 0; i < 5; i++ {
557 oldUnusedBlockLocators = append(oldUnusedBlockLocators, putBlock(t, fmt.Sprintf("%s%d", oldUnusedBlockData, i)))
559 backdateBlocks(t, oldUnusedBlockLocators)
562 summary.WriteDataTo = writeDataTo
563 collection.HeapProfileFilename = heapProfileFile
565 err := singlerun(arv)
568 t.Fatalf("Got an error during datamanager singlerun: %v", err)
572 t.Fatalf("Expected error during datamanager singlerun")
575 waitUntilQueuesFinishWork(t)
577 // Get block indexes and verify that all backdated blocks are not/deleted as expected
579 verifyBlocks(t, nil, oldUnusedBlockLocators, 2)
581 verifyBlocks(t, oldUnusedBlockLocators, nil, 2)