6 "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
7 "git.curoverse.com/arvados.git/sdk/go/arvadostest"
8 "git.curoverse.com/arvados.git/sdk/go/keepclient"
20 ActiveUserToken = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"
21 AdminToken = "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h"
24 var arv arvadosclient.ArvadosClient
25 var keepClient *keepclient.KeepClient
26 var keepServers []string
28 func SetupDataManagerTest(t *testing.T) {
29 os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
31 // start api and keep servers
32 arvadostest.ResetEnv()
33 arvadostest.StartAPI()
34 arvadostest.StartKeep(2, false)
36 arv = makeArvadosClient()
39 keepClient = &keepclient.KeepClient{
43 Client: &http.Client{},
46 // discover keep services
47 if err := keepClient.DiscoverKeepServers(); err != nil {
48 t.Fatalf("Error discovering keep services: %s", err)
50 keepServers = []string{}
51 for _, host := range keepClient.LocalRoots() {
52 keepServers = append(keepServers, host)
56 func TearDownDataManagerTest(t *testing.T) {
57 arvadostest.StopKeep(2)
61 func putBlock(t *testing.T, data string) string {
62 locator, _, err := keepClient.PutB([]byte(data))
64 t.Fatalf("Error putting test data for %s %s %v", data, locator, err)
67 t.Fatalf("No locator found after putting test data")
70 splits := strings.Split(locator, "+")
71 return splits[0] + "+" + splits[1]
74 func getBlock(t *testing.T, locator string, data string) {
75 reader, blocklen, _, err := keepClient.Get(locator)
77 t.Fatalf("Error getting test data in setup for %s %s %v", data, locator, err)
80 t.Fatalf("No reader found after putting test data")
82 if blocklen != int64(len(data)) {
83 t.Fatalf("blocklen %d did not match data len %d", blocklen, len(data))
86 all, err := ioutil.ReadAll(reader)
87 if string(all) != data {
88 t.Fatalf("Data read %s did not match expected data %s", string(all), data)
92 // Create a collection using arv-put
93 func createCollection(t *testing.T, data string) string {
94 tempfile, err := ioutil.TempFile(os.TempDir(), "temp-test-file")
95 defer os.Remove(tempfile.Name())
97 _, err = tempfile.Write([]byte(data))
99 t.Fatalf("Error writing to tempfile %v", err)
103 output, err := exec.Command("arv-put", "--use-filename", "test.txt", tempfile.Name()).Output()
105 t.Fatalf("Error running arv-put %s", err)
108 uuid := string(output[0:27]) // trim terminating char
112 // Get collection locator
113 var locatorMatcher = regexp.MustCompile(`^([0-9a-f]{32})\+(\d*)(.*)$`)
115 func getFirstLocatorFromCollection(t *testing.T, uuid string) string {
116 manifest := getCollection(t, uuid)["manifest_text"].(string)
118 locator := strings.Split(manifest, " ")[1]
119 match := locatorMatcher.FindStringSubmatch(locator)
121 t.Fatalf("No locator found in collection manifest %s", manifest)
124 return match[1] + "+" + match[2]
127 func getCollection(t *testing.T, uuid string) Dict {
128 getback := make(Dict)
129 err := arv.Get("collections", uuid, nil, &getback)
131 t.Fatalf("Error getting collection %s", err)
133 if getback["uuid"] != uuid {
134 t.Fatalf("Get collection uuid did not match original: $s, result: $s", uuid, getback["uuid"])
140 func updateCollection(t *testing.T, uuid string, paramName string, paramValue string) {
141 err := arv.Update("collections", uuid, arvadosclient.Dict{
142 "collection": arvadosclient.Dict{
143 paramName: paramValue,
145 }, &arvadosclient.Dict{})
148 t.Fatalf("Error updating collection %s", err)
152 type Dict map[string]interface{}
154 func deleteCollection(t *testing.T, uuid string) {
155 getback := make(Dict)
156 err := arv.Delete("collections", uuid, nil, &getback)
158 t.Fatalf("Error deleting collection %s", err)
160 if getback["uuid"] != uuid {
161 t.Fatalf("Delete collection uuid did not match original: $s, result: $s", uuid, getback["uuid"])
165 func dataManagerSingleRun(t *testing.T) {
166 err := singlerun(arv)
168 t.Fatalf("Error during singlerun %s", err)
172 func getBlockIndexesForServer(t *testing.T, i int) []string {
175 path := keepServers[i] + "/index"
176 client := http.Client{}
177 req, err := http.NewRequest("GET", path, nil)
178 req.Header.Add("Authorization", "OAuth2 "+AdminToken)
179 req.Header.Add("Content-Type", "application/octet-stream")
180 resp, err := client.Do(req)
181 defer resp.Body.Close()
184 t.Fatalf("Error during %s %s", path, err)
187 body, err := ioutil.ReadAll(resp.Body)
189 t.Fatalf("Error reading response from %s %s", path, err)
192 lines := strings.Split(string(body), "\n")
193 for _, line := range lines {
194 indexes = append(indexes, strings.Split(line, " ")...)
200 func getBlockIndexes(t *testing.T) [][]string {
201 var indexes [][]string
203 for i := 0; i < len(keepServers); i++ {
204 indexes = append(indexes, getBlockIndexesForServer(t, i))
209 func verifyBlocks(t *testing.T, notExpected []string, expected []string, minReplication int) {
210 blocks := getBlockIndexes(t)
212 for _, block := range notExpected {
213 for _, idx := range blocks {
214 if valueInArray(block, idx) {
215 t.Fatalf("Found unexpected block %s", block)
220 for _, block := range expected {
222 for _, idx := range blocks {
223 if valueInArray(block, idx) {
227 if nFound < minReplication {
228 t.Fatalf("Found %d replicas of block %s, expected >= %d", nFound, block, minReplication)
233 func valueInArray(value string, list []string) bool {
234 for _, v := range list {
243 Test env uses two keep volumes. The volume names can be found by reading the files
244 ARVADOS_HOME/tmp/keep0.volume and ARVADOS_HOME/tmp/keep1.volume
246 The keep volumes are of the dir structure:
247 volumeN/subdir/locator
249 func backdateBlocks(t *testing.T, oldUnusedBlockLocators []string) {
250 // First get rid of any size hints in the locators
251 var trimmedBlockLocators []string
252 for _, block := range oldUnusedBlockLocators {
253 trimmedBlockLocators = append(trimmedBlockLocators, strings.Split(block, "+")[0])
256 // Get the working dir so that we can read keep{n}.volume files
257 wd, err := os.Getwd()
259 t.Fatalf("Error getting working dir %s", err)
262 // Now cycle through the two keep volumes
263 oldTime := time.Now().AddDate(0, -2, 0)
264 for i := 0; i < 2; i++ {
265 filename := fmt.Sprintf("%s/../../tmp/keep%d.volume", wd, i)
266 volumeDir, err := ioutil.ReadFile(filename)
268 t.Fatalf("Error reading keep volume file %s %s", filename, err)
271 // Read the keep volume dir structure
272 volumeContents, err := ioutil.ReadDir(string(volumeDir))
274 t.Fatalf("Error reading keep dir %s %s", string(volumeDir), err)
277 // Read each subdir for each of the keep volume dir
278 for _, subdir := range volumeContents {
279 subdirName := fmt.Sprintf("%s/%s", volumeDir, subdir.Name())
280 subdirContents, err := ioutil.ReadDir(string(subdirName))
282 t.Fatalf("Error reading keep dir %s %s", string(subdirName), err)
285 // Now we got to the files. The files are names are the block locators
286 for _, fileInfo := range subdirContents {
287 blockName := fileInfo.Name()
288 myname := fmt.Sprintf("%s/%s", subdirName, blockName)
289 if valueInArray(blockName, trimmedBlockLocators) {
290 err = os.Chtimes(myname, oldTime, oldTime)
297 func getStatus(t *testing.T, path string) interface{} {
298 client := http.Client{}
299 req, err := http.NewRequest("GET", path, nil)
300 req.Header.Add("Authorization", "OAuth2 "+AdminToken)
301 req.Header.Add("Content-Type", "application/octet-stream")
302 resp, err := client.Do(req)
304 t.Fatalf("Error during %s %s", path, err)
306 defer resp.Body.Close()
309 json.NewDecoder(resp.Body).Decode(&s)
314 // Wait until PullQueue and TrashQueue are empty on all keepServers.
315 func waitUntilQueuesFinishWork(t *testing.T) {
316 for _, ks := range keepServers {
317 for done := false; !done; {
318 time.Sleep(100 * time.Millisecond)
319 s := getStatus(t, ks+"/status.json")
320 for _, qName := range []string{"PullQueue", "TrashQueue"} {
321 qStatus := s.(map[string]interface{})[qName].(map[string]interface{})
322 if qStatus["Queued"].(float64)+qStatus["InProgress"].(float64) == 0 {
331 Create some blocks and backdate some of them.
332 Also create some collections and delete some of them.
333 Verify block indexes.
335 func TestPutAndGetBlocks(t *testing.T) {
336 defer TearDownDataManagerTest(t)
337 SetupDataManagerTest(t)
339 // Put some blocks which will be backdated later on
340 // The first one will also be used in a collection and hence should not be deleted when datamanager runs.
341 // The rest will be old and unreferenced and hence should be deleted when datamanager runs.
342 var oldUnusedBlockLocators []string
343 oldUnusedBlockData := "this block will have older mtime"
344 for i := 0; i < 5; i++ {
345 oldUnusedBlockLocators = append(oldUnusedBlockLocators, putBlock(t, fmt.Sprintf("%s%d", oldUnusedBlockData, i)))
347 for i := 0; i < 5; i++ {
348 getBlock(t, oldUnusedBlockLocators[i], fmt.Sprintf("%s%d", oldUnusedBlockData, i))
351 // The rest will be old and unreferenced and hence should be deleted when datamanager runs.
352 oldUsedBlockData := "this collection block will have older mtime"
353 oldUsedBlockLocator := putBlock(t, oldUsedBlockData)
354 getBlock(t, oldUsedBlockLocator, oldUsedBlockData)
356 // Put some more blocks which will not be backdated; hence they are still new, but not in any collection.
357 // Hence, even though unreferenced, these should not be deleted when datamanager runs.
358 var newBlockLocators []string
359 newBlockData := "this block is newer"
360 for i := 0; i < 5; i++ {
361 newBlockLocators = append(newBlockLocators, putBlock(t, fmt.Sprintf("%s%d", newBlockData, i)))
363 for i := 0; i < 5; i++ {
364 getBlock(t, newBlockLocators[i], fmt.Sprintf("%s%d", newBlockData, i))
367 // Create a collection that would be deleted later on
368 toBeDeletedCollectionUUID := createCollection(t, "some data for collection creation")
369 toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUUID)
371 // Create another collection that has the same data as the one of the old blocks
372 oldUsedBlockCollectionUUID := createCollection(t, oldUsedBlockData)
373 oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUUID)
374 if oldUsedBlockCollectionLocator != oldUsedBlockLocator {
375 t.Fatalf("Locator of the collection with the same data as old block is different %s", oldUsedBlockCollectionLocator)
378 // Create another collection whose replication level will be changed
379 replicationCollectionUUID := createCollection(t, "replication level on this collection will be reduced")
380 replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUUID)
382 // Create two collections with same data; one will be deleted later on
383 dataForTwoCollections := "one of these collections will be deleted"
384 oneOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
385 oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUUID)
386 secondOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
387 secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUUID)
388 if oneOfTwoWithSameDataLocator != secondOfTwoWithSameDataLocator {
389 t.Fatalf("Locators for both these collections expected to be same: %s %s", oneOfTwoWithSameDataLocator, secondOfTwoWithSameDataLocator)
392 // Verify blocks before doing any backdating / deleting.
393 var expected []string
394 expected = append(expected, oldUnusedBlockLocators...)
395 expected = append(expected, newBlockLocators...)
396 expected = append(expected, toBeDeletedCollectionLocator)
397 expected = append(expected, replicationCollectionLocator)
398 expected = append(expected, oneOfTwoWithSameDataLocator)
399 expected = append(expected, secondOfTwoWithSameDataLocator)
401 verifyBlocks(t, nil, expected, 2)
403 // Run datamanager in singlerun mode
404 dataManagerSingleRun(t)
405 waitUntilQueuesFinishWork(t)
407 verifyBlocks(t, nil, expected, 2)
409 // Backdate the to-be old blocks and delete the collections
410 backdateBlocks(t, oldUnusedBlockLocators)
411 deleteCollection(t, toBeDeletedCollectionUUID)
412 deleteCollection(t, secondOfTwoWithSameDataUUID)
414 // Run data manager again
415 dataManagerSingleRun(t)
416 waitUntilQueuesFinishWork(t)
418 // Get block indexes and verify that all backdated blocks except the first one used in collection are not included.
419 expected = expected[:0]
420 expected = append(expected, oldUsedBlockLocator)
421 expected = append(expected, newBlockLocators...)
422 expected = append(expected, toBeDeletedCollectionLocator)
423 expected = append(expected, oneOfTwoWithSameDataLocator)
424 expected = append(expected, secondOfTwoWithSameDataLocator)
426 verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
428 // Reduce desired replication on replicationCollectionUUID
429 // collection, and verify that Data Manager does not reduce
430 // actual replication any further than that. (It might not
431 // reduce actual replication at all; that's OK for this test.)
433 // Reduce desired replication level.
434 updateCollection(t, replicationCollectionUUID, "replication_desired", "1")
435 collection := getCollection(t, replicationCollectionUUID)
436 if collection["replication_desired"].(interface{}) != float64(1) {
437 t.Fatalf("After update replication_desired is not 1; instead it is %v", collection["replication_desired"])
440 // Verify data is currently overreplicated.
441 verifyBlocks(t, nil, []string{replicationCollectionLocator}, 2)
443 // Run data manager again
444 dataManagerSingleRun(t)
445 waitUntilQueuesFinishWork(t)
447 // Verify data is not underreplicated.
448 verifyBlocks(t, nil, []string{replicationCollectionLocator}, 1)
450 // Verify *other* collections' data is not underreplicated.
451 verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
454 func TestDatamanagerSingleRunRepeatedly(t *testing.T) {
455 defer TearDownDataManagerTest(t)
456 SetupDataManagerTest(t)
458 for i := 0; i < 10; i++ {
459 err := singlerun(arv)
461 t.Fatalf("Got an error during datamanager singlerun: %v", err)
466 func TestGetStatusRepeatedly(t *testing.T) {
467 defer TearDownDataManagerTest(t)
468 SetupDataManagerTest(t)
470 for i := 0; i < 10; i++ {
471 for j := 0; j < 2; j++ {
472 s := getStatus(t, keepServers[j]+"/status.json")
474 var pullQueueStatus interface{}
475 pullQueueStatus = s.(map[string]interface{})["PullQueue"]
476 var trashQueueStatus interface{}
477 trashQueueStatus = s.(map[string]interface{})["TrashQueue"]
479 if pullQueueStatus.(map[string]interface{})["Queued"] == nil ||
480 pullQueueStatus.(map[string]interface{})["InProgress"] == nil ||
481 trashQueueStatus.(map[string]interface{})["Queued"] == nil ||
482 trashQueueStatus.(map[string]interface{})["InProgress"] == nil {
483 t.Fatalf("PullQueue and TrashQueue status not found")
486 time.Sleep(100 * time.Millisecond)
491 func TestRunDatamanagerWithBogusServer(t *testing.T) {
492 defer TearDownDataManagerTest(t)
493 SetupDataManagerTest(t)
495 arv.ApiServer = "bogus-server"
497 err := singlerun(arv)
499 t.Fatalf("Expected error during singlerun with bogus server")
503 func TestRunDatamanagerAsNonAdminUser(t *testing.T) {
504 defer TearDownDataManagerTest(t)
505 SetupDataManagerTest(t)
507 arv.ApiToken = ActiveUserToken
509 err := singlerun(arv)
511 t.Fatalf("Expected error during singlerun as non-admin user")