6 "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
7 "git.curoverse.com/arvados.git/sdk/go/arvadostest"
8 "git.curoverse.com/arvados.git/sdk/go/keepclient"
20 ActiveUserToken = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"
21 AdminToken = "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h"
24 var arv arvadosclient.ArvadosClient
25 var keepClient *keepclient.KeepClient
26 var keepServers []string
28 func SetupDataManagerTest(t *testing.T) {
29 os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
31 // start api and keep servers
32 arvadostest.ResetEnv()
33 arvadostest.StartAPI()
34 arvadostest.StartKeep()
36 arv = makeArvadosClient()
39 keepClient = &keepclient.KeepClient{
43 Client: &http.Client{},
46 // discover keep services
47 if err := keepClient.DiscoverKeepServers(); err != nil {
48 t.Fatalf("Error discovering keep services: %s", err)
50 for _, host := range keepClient.LocalRoots() {
51 keepServers = append(keepServers, host)
55 func TearDownDataManagerTest(t *testing.T) {
56 arvadostest.StopKeep()
60 func putBlock(t *testing.T, data string) string {
61 locator, _, err := keepClient.PutB([]byte(data))
63 t.Fatalf("Error putting test data for %s %s %v", data, locator, err)
66 t.Fatalf("No locator found after putting test data")
69 splits := strings.Split(locator, "+")
70 return splits[0] + "+" + splits[1]
73 func getBlock(t *testing.T, locator string, data string) {
74 reader, blocklen, _, err := keepClient.Get(locator)
76 t.Fatalf("Error getting test data in setup for %s %s %v", data, locator, err)
79 t.Fatalf("No reader found after putting test data")
81 if blocklen != int64(len(data)) {
82 t.Fatalf("blocklen %d did not match data len %d", blocklen, len(data))
85 all, err := ioutil.ReadAll(reader)
86 if string(all) != data {
87 t.Fatalf("Data read %s did not match expected data %s", string(all), data)
91 // Create a collection using arv-put
92 func createCollection(t *testing.T, data string) string {
93 tempfile, err := ioutil.TempFile(os.TempDir(), "temp-test-file")
94 defer os.Remove(tempfile.Name())
96 _, err = tempfile.Write([]byte(data))
98 t.Fatalf("Error writing to tempfile %v", err)
102 output, err := exec.Command("arv-put", "--use-filename", "test.txt", tempfile.Name()).Output()
104 t.Fatalf("Error running arv-put %s", err)
107 uuid := string(output[0:27]) // trim terminating char
111 // Get collection locator
112 var locatorMatcher = regexp.MustCompile(`^([0-9a-f]{32})\+(\d*)(.*)$`)
114 func getFirstLocatorFromCollection(t *testing.T, uuid string) string {
115 manifest := getCollection(t, uuid)["manifest_text"].(string)
117 locator := strings.Split(manifest, " ")[1]
118 match := locatorMatcher.FindStringSubmatch(locator)
120 t.Fatalf("No locator found in collection manifest %s", manifest)
123 return match[1] + "+" + match[2]
126 func getCollection(t *testing.T, uuid string) Dict {
127 getback := make(Dict)
128 err := arv.Get("collections", uuid, nil, &getback)
130 t.Fatalf("Error getting collection %s", err)
132 if getback["uuid"] != uuid {
133 t.Fatalf("Get collection uuid did not match original: $s, result: $s", uuid, getback["uuid"])
139 func updateCollection(t *testing.T, uuid string, paramName string, paramValue string) {
140 err := arv.Update("collections", uuid, arvadosclient.Dict{
141 "collection": arvadosclient.Dict{
142 paramName: paramValue,
144 }, &arvadosclient.Dict{})
147 t.Fatalf("Error updating collection %s", err)
151 type Dict map[string]interface{}
153 func deleteCollection(t *testing.T, uuid string) {
154 getback := make(Dict)
155 err := arv.Delete("collections", uuid, nil, &getback)
157 t.Fatalf("Error deleting collection %s", err)
159 if getback["uuid"] != uuid {
160 t.Fatalf("Delete collection uuid did not match original: $s, result: $s", uuid, getback["uuid"])
164 func dataManagerSingleRun(t *testing.T) {
165 err := singlerun(arv)
167 t.Fatalf("Error during singlerun %s", err)
171 func getBlockIndexesForServer(t *testing.T, i int) []string {
174 path := keepServers[i] + "/index"
175 client := http.Client{}
176 req, err := http.NewRequest("GET", path, nil)
177 req.Header.Add("Authorization", "OAuth2 " + AdminToken)
178 req.Header.Add("Content-Type", "application/octet-stream")
179 resp, err := client.Do(req)
180 defer resp.Body.Close()
183 t.Fatalf("Error during %s %s", path, err)
186 body, err := ioutil.ReadAll(resp.Body)
188 t.Fatalf("Error reading response from %s %s", path, err)
191 lines := strings.Split(string(body), "\n")
192 for _, line := range lines {
193 indexes = append(indexes, strings.Split(line, " ")...)
199 func getBlockIndexes(t *testing.T) [][]string {
200 var indexes [][]string
202 for i := 0; i < len(keepServers); i++ {
203 indexes = append(indexes, getBlockIndexesForServer(t, i))
208 func verifyBlocks(t *testing.T, notExpected []string, expected []string) {
209 blocks := getBlockIndexes(t)
211 for _, block := range notExpected {
212 for _, idx := range blocks {
213 if valueInArray(block, idx) {
214 t.Fatalf("Found unexpected block %s", block)
219 for _, block := range expected {
221 for _, idx := range blocks {
222 if valueInArray(block, idx) {
227 t.Fatalf("Found %d replicas of block %s, expected >= 2", nFound, block)
232 func valueInArray(value string, list []string) bool {
233 for _, v := range list {
242 Test env uses two keep volumes. The volume names can be found by reading the files
243 ARVADOS_HOME/tmp/keep0.volume and ARVADOS_HOME/tmp/keep1.volume
245 The keep volumes are of the dir structure:
246 volumeN/subdir/locator
248 func backdateBlocks(t *testing.T, oldUnusedBlockLocators []string) {
249 // First get rid of any size hints in the locators
250 var trimmedBlockLocators []string
251 for _, block := range oldUnusedBlockLocators {
252 trimmedBlockLocators = append(trimmedBlockLocators, strings.Split(block, "+")[0])
255 // Get the working dir so that we can read keep{n}.volume files
256 wd, err := os.Getwd()
258 t.Fatalf("Error getting working dir %s", err)
261 // Now cycle through the two keep volumes
262 oldTime := time.Now().AddDate(0, -2, 0)
263 for i := 0; i < 2; i++ {
264 filename := fmt.Sprintf("%s/../../tmp/keep%d.volume", wd, i)
265 volumeDir, err := ioutil.ReadFile(filename)
267 t.Fatalf("Error reading keep volume file %s %s", filename, err)
270 // Read the keep volume dir structure
271 volumeContents, err := ioutil.ReadDir(string(volumeDir))
273 t.Fatalf("Error reading keep dir %s %s", string(volumeDir), err)
276 // Read each subdir for each of the keep volume dir
277 for _, subdir := range volumeContents {
278 subdirName := fmt.Sprintf("%s/%s", volumeDir, subdir.Name())
279 subdirContents, err := ioutil.ReadDir(string(subdirName))
281 t.Fatalf("Error reading keep dir %s %s", string(subdirName), err)
284 // Now we got to the files. The files are names are the block locators
285 for _, fileInfo := range subdirContents {
286 blockName := fileInfo.Name()
287 myname := fmt.Sprintf("%s/%s", subdirName, blockName)
288 if valueInArray(blockName, trimmedBlockLocators) {
289 err = os.Chtimes(myname, oldTime, oldTime)
296 func getStatus(t *testing.T, path string) interface{} {
297 client := http.Client{}
298 req, err := http.NewRequest("GET", path, nil)
299 req.Header.Add("Authorization", "OAuth2 " + AdminToken)
300 req.Header.Add("Content-Type", "application/octet-stream")
301 resp, err := client.Do(req)
302 defer resp.Body.Close()
305 t.Fatalf("Error during %s %s", path, err)
309 json.NewDecoder(resp.Body).Decode(&s)
314 func waitUntilQueuesFinishWork(t *testing.T) {
315 // Wait until PullQueue and TrashQueue finish their work
318 for i := 0; i < 2; i++ {
319 s := getStatus(t, keepServers[i]+"/status.json")
320 var pullQueueStatus interface{}
321 pullQueueStatus = s.(map[string]interface{})["PullQueue"]
322 var trashQueueStatus interface{}
323 trashQueueStatus = s.(map[string]interface{})["TrashQueue"]
325 if pullQueueStatus.(map[string]interface{})["Queued"] == float64(0) &&
326 pullQueueStatus.(map[string]interface{})["InProgress"] == float64(0) &&
327 trashQueueStatus.(map[string]interface{})["Queued"] == float64(0) &&
328 trashQueueStatus.(map[string]interface{})["InProgress"] == float64(0) {
332 if done[0] && done[1] {
335 time.Sleep(100 * time.Millisecond)
341 Create some blocks and backdate some of them.
342 Also create some collections and delete some of them.
343 Verify block indexes.
345 func TestPutAndGetBlocks(t *testing.T) {
346 defer TearDownDataManagerTest(t)
347 SetupDataManagerTest(t)
349 // Put some blocks which will be backdated later on
350 // The first one will also be used in a collection and hence should not be deleted when datamanager runs.
351 // The rest will be old and unreferenced and hence should be deleted when datamanager runs.
352 var oldUnusedBlockLocators []string
353 oldUnusedBlockData := "this block will have older mtime"
354 for i := 0; i < 5; i++ {
355 oldUnusedBlockLocators = append(oldUnusedBlockLocators, putBlock(t, fmt.Sprintf("%s%d", oldUnusedBlockData, i)))
357 for i := 0; i < 5; i++ {
358 getBlock(t, oldUnusedBlockLocators[i], fmt.Sprintf("%s%d", oldUnusedBlockData, i))
361 // The rest will be old and unreferenced and hence should be deleted when datamanager runs.
362 oldUsedBlockData := "this collection block will have older mtime"
363 oldUsedBlockLocator := putBlock(t, oldUsedBlockData)
364 getBlock(t, oldUsedBlockLocator, oldUsedBlockData)
366 // Put some more blocks which will not be backdated; hence they are still new, but not in any collection.
367 // Hence, even though unreferenced, these should not be deleted when datamanager runs.
368 var newBlockLocators []string
369 newBlockData := "this block is newer"
370 for i := 0; i < 5; i++ {
371 newBlockLocators = append(newBlockLocators, putBlock(t, fmt.Sprintf("%s%d", newBlockData, i)))
373 for i := 0; i < 5; i++ {
374 getBlock(t, newBlockLocators[i], fmt.Sprintf("%s%d", newBlockData, i))
377 // Create a collection that would be deleted later on
378 toBeDeletedCollectionUuid := createCollection(t, "some data for collection creation")
379 toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUuid)
381 // Create another collection that has the same data as the one of the old blocks
382 oldUsedBlockCollectionUuid := createCollection(t, oldUsedBlockData)
383 oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUuid)
384 if oldUsedBlockCollectionLocator != oldUsedBlockLocator {
385 t.Fatalf("Locator of the collection with the same data as old block is different %s", oldUsedBlockCollectionLocator)
388 // Create another collection whose replication level will be changed
389 replicationCollectionUuid := createCollection(t, "replication level on this collection will be reduced")
390 replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUuid)
392 // Create two collections with same data; one will be deleted later on
393 dataForTwoCollections := "one of these collections will be deleted"
394 oneOfTwoWithSameDataUuid := createCollection(t, dataForTwoCollections)
395 oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUuid)
396 secondOfTwoWithSameDataUuid := createCollection(t, dataForTwoCollections)
397 secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUuid)
398 if oneOfTwoWithSameDataLocator != secondOfTwoWithSameDataLocator {
399 t.Fatalf("Locators for both these collections expected to be same: %s %s", oneOfTwoWithSameDataLocator, secondOfTwoWithSameDataLocator)
402 // Verify blocks before doing any backdating / deleting.
403 var expected []string
404 expected = append(expected, oldUnusedBlockLocators...)
405 expected = append(expected, newBlockLocators...)
406 expected = append(expected, toBeDeletedCollectionLocator)
407 expected = append(expected, replicationCollectionLocator)
408 expected = append(expected, oneOfTwoWithSameDataLocator)
409 expected = append(expected, secondOfTwoWithSameDataLocator)
411 verifyBlocks(t, nil, expected)
413 // Run datamanager in singlerun mode
414 dataManagerSingleRun(t)
415 waitUntilQueuesFinishWork(t)
417 verifyBlocks(t, nil, expected)
419 // Backdate the to-be old blocks and delete the collections
420 backdateBlocks(t, oldUnusedBlockLocators)
421 deleteCollection(t, toBeDeletedCollectionUuid)
422 deleteCollection(t, secondOfTwoWithSameDataUuid)
424 // Run data manager again
425 dataManagerSingleRun(t)
426 waitUntilQueuesFinishWork(t)
428 // Get block indexes and verify that all backdated blocks except the first one used in collection are not included.
429 expected = expected[:0]
430 expected = append(expected, oldUsedBlockLocator)
431 expected = append(expected, newBlockLocators...)
432 expected = append(expected, toBeDeletedCollectionLocator)
433 expected = append(expected, replicationCollectionLocator)
434 expected = append(expected, oneOfTwoWithSameDataLocator)
435 expected = append(expected, secondOfTwoWithSameDataLocator)
437 verifyBlocks(t, oldUnusedBlockLocators, expected)
439 // Reduce replication on replicationCollectionUuid collection and verify that the overreplicated blocks are untouched.
441 // Default replication level is 2; first verify that the replicationCollectionLocator appears in both volumes
442 for i := 0; i < len(keepServers); i++ {
443 indexes := getBlockIndexesForServer(t, i)
444 if !valueInArray(replicationCollectionLocator, indexes) {
445 t.Fatalf("Not found block in index %s", replicationCollectionLocator)
449 // Now reduce replication level on this collection and verify that it still appears in both volumes
450 updateCollection(t, replicationCollectionUuid, "replication_desired", "1")
451 collection := getCollection(t, replicationCollectionUuid)
452 if collection["replication_desired"].(interface{}) != float64(1) {
453 t.Fatalf("After update replication_desired is not 1; instead it is %v", collection["replication_desired"])
456 // Run data manager again
457 time.Sleep(100 * time.Millisecond)
458 dataManagerSingleRun(t)
459 waitUntilQueuesFinishWork(t)
461 for i := 0; i < len(keepServers); i++ {
462 indexes := getBlockIndexesForServer(t, i)
463 if !valueInArray(replicationCollectionLocator, indexes) {
464 t.Fatalf("Not found block in index %s", replicationCollectionLocator)
467 // Done testing reduce replication on collection
469 // Verify blocks one more time
470 verifyBlocks(t, oldUnusedBlockLocators, expected)
473 func TestDatamanagerSingleRunRepeatedly(t *testing.T) {
474 defer TearDownDataManagerTest(t)
475 SetupDataManagerTest(t)
477 for i := 0; i < 10; i++ {
478 err := singlerun(arv)
480 t.Fatalf("Got an error during datamanager singlerun: %v", err)
482 time.Sleep(100 * time.Millisecond)
486 func TestGetStatusRepeatedly(t *testing.T) {
487 t.Skip("This test still fails. Skip it until it is fixed.")
489 defer TearDownDataManagerTest(t)
490 SetupDataManagerTest(t)
492 for i := 0; i < 10; i++ {
493 for j := 0; j < 2; j++ {
494 s := getStatus(t, keepServers[j]+"/status.json")
496 var pullQueueStatus interface{}
497 pullQueueStatus = s.(map[string]interface{})["PullQueue"]
498 var trashQueueStatus interface{}
499 trashQueueStatus = s.(map[string]interface{})["TrashQueue"]
501 if pullQueueStatus.(map[string]interface{})["Queued"] == nil ||
502 pullQueueStatus.(map[string]interface{})["InProgress"] == nil ||
503 trashQueueStatus.(map[string]interface{})["Queued"] == nil ||
504 trashQueueStatus.(map[string]interface{})["InProgress"] == nil {
505 t.Fatalf("PullQueue and TrashQueue status not found")
508 time.Sleep(100 * time.Millisecond)
513 func TestRunDatamanagerWithBogusServer(t *testing.T) {
514 defer TearDownDataManagerTest(t)
515 SetupDataManagerTest(t)
517 arv.ApiServer = "bogus-server"
519 err := singlerun(arv)
521 t.Fatalf("Expected error during singlerun with bogus server")
525 func TestRunDatamanagerAsNonAdminUser(t *testing.T) {
526 defer TearDownDataManagerTest(t)
527 SetupDataManagerTest(t)
529 arv.ApiToken = ActiveUserToken
531 err := singlerun(arv)
533 t.Fatalf("Expected error during singlerun as non-admin user")