"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
- "git.curoverse.com/arvados.git/services/datamanager/keep"
"io/ioutil"
- "log"
"net/http"
"os"
"os/exec"
"time"
)
+const (
+ ActiveUserToken = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"
+ AdminToken = "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h"
+)
+
var arv arvadosclient.ArvadosClient
var keepClient *keepclient.KeepClient
var keepServers []string
arvadostest.StartAPI()
arvadostest.StartKeep()
- // make arvadosclient
- var err error
- arv, err = arvadosclient.MakeArvadosClient()
- if err != nil {
- t.Fatalf("Error setting up arvados client: %s", err)
- }
+ arv = makeArvadosClient()
// keep client
keepClient = &keepclient.KeepClient{
if err := keepClient.DiscoverKeepServers(); err != nil {
t.Fatalf("Error discovering keep services: %s", err)
}
+ keepServers = []string{}
for _, host := range keepClient.LocalRoots() {
keepServers = append(keepServers, host)
}
// Get collection locator
var locatorMatcher = regexp.MustCompile(`^([0-9a-f]{32})\+(\d*)(.*)$`)
-func getCollectionLocator(t *testing.T, uuid string) string {
+func getFirstLocatorFromCollection(t *testing.T, uuid string) string {
manifest := getCollection(t, uuid)["manifest_text"].(string)
locator := strings.Split(manifest, " ")[1]
}
func dataManagerSingleRun(t *testing.T) {
- err := singlerun()
+ err := singlerun(arv)
if err != nil {
t.Fatalf("Error during singlerun %s", err)
}
path := keepServers[i] + "/index"
client := http.Client{}
req, err := http.NewRequest("GET", path, nil)
- req.Header.Add("Authorization", "OAuth2 "+keep.GetDataManagerToken(nil))
+ req.Header.Add("Authorization", "OAuth2 "+AdminToken)
req.Header.Add("Content-Type", "application/octet-stream")
resp, err := client.Do(req)
defer resp.Body.Close()
return indexes
}
-func verifyBlocks(t *testing.T, notExpected []string, expected []string) {
+func verifyBlocks(t *testing.T, notExpected []string, expected []string, minReplication int) {
blocks := getBlockIndexes(t)
+
for _, block := range notExpected {
- for i := 0; i < len(blocks); i++ {
- exists := valueInArray(block, blocks[i])
- if exists {
- t.Fatalf("Found unexpected block in index %s", block)
+ for _, idx := range blocks {
+ if valueInArray(block, idx) {
+ t.Fatalf("Found unexpected block %s", block)
}
}
}
- // var blockExists [][]string
- blockExists := make(map[string][]string)
for _, block := range expected {
- var blockArray []string
- for i := 0; i < len(blocks); i++ {
- exists := valueInArray(block, blocks[i])
- if exists {
- blockArray = append(blockArray, block)
+ nFound := 0
+ for _, idx := range blocks {
+ if valueInArray(block, idx) {
+ nFound++
}
}
- blockExists[block] = blockArray
- }
-
- for _, block := range expected {
- if blockExists[block] == nil || len(blockExists[block]) != 2 {
- t.Fatalf("Expected to find two replicas for block %s; found %d", block, len(blockExists[block]))
+ if nFound < minReplication {
+ t.Fatalf("Found %d replicas of block %s, expected >= %d", nFound, block, minReplication)
}
}
}
func getStatus(t *testing.T, path string) interface{} {
client := http.Client{}
req, err := http.NewRequest("GET", path, nil)
- req.Header.Add("Authorization", "OAuth2 "+keep.GetDataManagerToken(nil))
+ req.Header.Add("Authorization", "OAuth2 "+AdminToken)
req.Header.Add("Content-Type", "application/octet-stream")
resp, err := client.Do(req)
- defer resp.Body.Close()
-
if err != nil {
t.Fatalf("Error during %s %s", path, err)
}
+ defer resp.Body.Close()
var s interface{}
json.NewDecoder(resp.Body).Decode(&s)
return s
}
+// Wait until PullQueue and TrashQueue are empty on all keepServers.
func waitUntilQueuesFinishWork(t *testing.T) {
- // Wait until PullQueue and TrashQueue finish their work
- for {
- var done [2]bool
- for i := 0; i < 2; i++ {
- s := getStatus(t, keepServers[i]+"/status.json")
- var pullQueueStatus interface{}
- pullQueueStatus = s.(map[string]interface{})["PullQueue"]
- var trashQueueStatus interface{}
- trashQueueStatus = s.(map[string]interface{})["TrashQueue"]
-
- if pullQueueStatus.(map[string]interface{})["Queued"] == float64(0) &&
- pullQueueStatus.(map[string]interface{})["InProgress"] == float64(0) &&
- trashQueueStatus.(map[string]interface{})["Queued"] == float64(0) &&
- trashQueueStatus.(map[string]interface{})["InProgress"] == float64(0) {
- done[i] = true
- }
- }
- if done[0] && done[1] {
- break
- } else {
+ for _, ks := range keepServers {
+ for done := false; !done; {
time.Sleep(100 * time.Millisecond)
+ s := getStatus(t, ks+"/status.json")
+ for _, qName := range []string{"PullQueue", "TrashQueue"} {
+ qStatus := s.(map[string]interface{})[qName].(map[string]interface{})
+ if qStatus["Queued"].(float64)+qStatus["InProgress"].(float64) == 0 {
+ done = true
+ }
+ }
}
}
}
Verify block indexes.
*/
func TestPutAndGetBlocks(t *testing.T) {
- log.Print("TestPutAndGetBlocks start")
defer TearDownDataManagerTest(t)
SetupDataManagerTest(t)
}
// Create a collection that would be deleted later on
- toBeDeletedCollectionUuid := createCollection(t, "some data for collection creation")
- toBeDeletedCollectionLocator := getCollectionLocator(t, toBeDeletedCollectionUuid)
+ toBeDeletedCollectionUUID := createCollection(t, "some data for collection creation")
+ toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUUID)
// Create another collection that has the same data as the one of the old blocks
- oldUsedBlockCollectionUuid := createCollection(t, oldUsedBlockData)
- oldUsedBlockCollectionLocator := getCollectionLocator(t, oldUsedBlockCollectionUuid)
+ oldUsedBlockCollectionUUID := createCollection(t, oldUsedBlockData)
+ oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUUID)
if oldUsedBlockCollectionLocator != oldUsedBlockLocator {
t.Fatalf("Locator of the collection with the same data as old block is different %s", oldUsedBlockCollectionLocator)
}
// Create another collection whose replication level will be changed
- replicationCollectionUuid := createCollection(t, "replication level on this collection will be reduced")
- replicationCollectionLocator := getCollectionLocator(t, replicationCollectionUuid)
+ replicationCollectionUUID := createCollection(t, "replication level on this collection will be reduced")
+ replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUUID)
// Create two collections with same data; one will be deleted later on
dataForTwoCollections := "one of these collections will be deleted"
- oneOfTwoWithSameDataUuid := createCollection(t, dataForTwoCollections)
- oneOfTwoWithSameDataLocator := getCollectionLocator(t, oneOfTwoWithSameDataUuid)
- secondOfTwoWithSameDataUuid := createCollection(t, dataForTwoCollections)
- secondOfTwoWithSameDataLocator := getCollectionLocator(t, secondOfTwoWithSameDataUuid)
+ oneOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
+ oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUUID)
+ secondOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
+ secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUUID)
if oneOfTwoWithSameDataLocator != secondOfTwoWithSameDataLocator {
t.Fatalf("Locators for both these collections expected to be same: %s %s", oneOfTwoWithSameDataLocator, secondOfTwoWithSameDataLocator)
}
expected = append(expected, oneOfTwoWithSameDataLocator)
expected = append(expected, secondOfTwoWithSameDataLocator)
- verifyBlocks(t, nil, expected)
+ verifyBlocks(t, nil, expected, 2)
// Run datamanager in singlerun mode
dataManagerSingleRun(t)
waitUntilQueuesFinishWork(t)
- verifyBlocks(t, nil, expected)
+ verifyBlocks(t, nil, expected, 2)
// Backdate the to-be old blocks and delete the collections
backdateBlocks(t, oldUnusedBlockLocators)
- deleteCollection(t, toBeDeletedCollectionUuid)
- deleteCollection(t, secondOfTwoWithSameDataUuid)
+ deleteCollection(t, toBeDeletedCollectionUUID)
+ deleteCollection(t, secondOfTwoWithSameDataUUID)
// Run data manager again
dataManagerSingleRun(t)
expected = append(expected, oldUsedBlockLocator)
expected = append(expected, newBlockLocators...)
expected = append(expected, toBeDeletedCollectionLocator)
- expected = append(expected, replicationCollectionLocator)
expected = append(expected, oneOfTwoWithSameDataLocator)
expected = append(expected, secondOfTwoWithSameDataLocator)
- verifyBlocks(t, oldUnusedBlockLocators, expected)
+ verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
- // Reduce replication on replicationCollectionUuid collection and verify that the overreplicated blocks are untouched.
+ // Reduce desired replication on replicationCollectionUUID
+ // collection, and verify that Data Manager does not reduce
+ // actual replication any further than that. (It might not
+ // reduce actual replication at all; that's OK for this test.)
- // Default replication level is 2; first verify that the replicationCollectionLocator appears in both volumes
- for i := 0; i < len(keepServers); i++ {
- indexes := getBlockIndexesForServer(t, i)
- if !valueInArray(replicationCollectionLocator, indexes) {
- t.Fatalf("Not found block in index %s", replicationCollectionLocator)
- }
- }
-
- // Now reduce replication level on this collection and verify that it still appears in both volumes
- updateCollection(t, replicationCollectionUuid, "replication_desired", "1")
- collection := getCollection(t, replicationCollectionUuid)
+ // Reduce desired replication level.
+ updateCollection(t, replicationCollectionUUID, "replication_desired", "1")
+ collection := getCollection(t, replicationCollectionUUID)
if collection["replication_desired"].(interface{}) != float64(1) {
t.Fatalf("After update replication_desired is not 1; instead it is %v", collection["replication_desired"])
}
+ // Verify data is currently overreplicated.
+ verifyBlocks(t, nil, []string{replicationCollectionLocator}, 2)
+
// Run data manager again
- time.Sleep(100 * time.Millisecond)
dataManagerSingleRun(t)
waitUntilQueuesFinishWork(t)
- for i := 0; i < len(keepServers); i++ {
- indexes := getBlockIndexesForServer(t, i)
- if !valueInArray(replicationCollectionLocator, indexes) {
- t.Fatalf("Not found block in index %s", replicationCollectionLocator)
- }
- }
- // Done testing reduce replication on collection
+ // Verify data is not underreplicated.
+ verifyBlocks(t, nil, []string{replicationCollectionLocator}, 1)
- // Verify blocks one more time
- verifyBlocks(t, oldUnusedBlockLocators, expected)
+ // Verify *other* collections' data is not underreplicated.
+ verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
}
-func _TestDatamanagerSingleRunRepeatedly(t *testing.T) {
- log.Print("TestDatamanagerSingleRunRepeatedly start")
-
+func TestDatamanagerSingleRunRepeatedly(t *testing.T) {
defer TearDownDataManagerTest(t)
SetupDataManagerTest(t)
for i := 0; i < 10; i++ {
- err := singlerun()
+ err := singlerun(arv)
if err != nil {
t.Fatalf("Got an error during datamanager singlerun: %v", err)
}
- time.Sleep(100 * time.Millisecond)
}
}
-func _TestGetStatusRepeatedly(t *testing.T) {
- log.Print("TestGetStatusRepeatedly start")
-
+func TestGetStatusRepeatedly(t *testing.T) {
defer TearDownDataManagerTest(t)
SetupDataManagerTest(t)
}
}
}
+
+func TestRunDatamanagerWithBogusServer(t *testing.T) {
+ defer TearDownDataManagerTest(t)
+ SetupDataManagerTest(t)
+
+ arv.ApiServer = "bogus-server"
+
+ err := singlerun(arv)
+ if err == nil {
+ t.Fatalf("Expected error during singlerun with bogus server")
+ }
+}
+
+func TestRunDatamanagerAsNonAdminUser(t *testing.T) {
+ defer TearDownDataManagerTest(t)
+ SetupDataManagerTest(t)
+
+ arv.ApiToken = ActiveUserToken
+
+ err := singlerun(arv)
+ if err == nil {
+ t.Fatalf("Expected error during singlerun as non-admin user")
+ }
+}