"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
+ "git.curoverse.com/arvados.git/services/datamanager/collection"
+ "git.curoverse.com/arvados.git/services/datamanager/summary"
"io/ioutil"
"net/http"
"os"
"time"
)
-const (
- ActiveUserToken = "3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi"
- AdminToken = "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h"
-)
-
var arv arvadosclient.ArvadosClient
var keepClient *keepclient.KeepClient
var keepServers []string
// start api and keep servers
arvadostest.ResetEnv()
arvadostest.StartAPI()
- arvadostest.StartKeep()
+ arvadostest.StartKeep(2, false)
- arv = makeArvadosClient()
+ var err error
+ arv, err = arvadosclient.MakeArvadosClient()
+ if err != nil {
+ t.Fatalf("Error making arvados client: %s", err)
+ }
+ arv.ApiToken = arvadostest.DataManagerToken
// keep client
keepClient = &keepclient.KeepClient{
Arvados: &arv,
Want_replicas: 2,
- Using_proxy: true,
Client: &http.Client{},
}
// discover keep services
- if err := keepClient.DiscoverKeepServers(); err != nil {
+ if err = keepClient.DiscoverKeepServers(); err != nil {
t.Fatalf("Error discovering keep services: %s", err)
}
keepServers = []string{}
}
func TearDownDataManagerTest(t *testing.T) {
- arvadostest.StopKeep()
+ arvadostest.StopKeep(2)
arvadostest.StopAPI()
+ summary.WriteDataTo = ""
+ collection.HeapProfileFilename = ""
}
func putBlock(t *testing.T, data string) string {
return match[1] + "+" + match[2]
}
+func switchToken(t string) func() {
+ orig := arv.ApiToken
+ restore := func() {
+ arv.ApiToken = orig
+ }
+ arv.ApiToken = t
+ return restore
+}
+
func getCollection(t *testing.T, uuid string) Dict {
+ defer switchToken(arvadostest.AdminToken)()
+
getback := make(Dict)
err := arv.Get("collections", uuid, nil, &getback)
if err != nil {
}
func updateCollection(t *testing.T, uuid string, paramName string, paramValue string) {
+ defer switchToken(arvadostest.AdminToken)()
+
err := arv.Update("collections", uuid, arvadosclient.Dict{
"collection": arvadosclient.Dict{
paramName: paramValue,
type Dict map[string]interface{}
func deleteCollection(t *testing.T, uuid string) {
+ defer switchToken(arvadostest.AdminToken)()
+
getback := make(Dict)
err := arv.Delete("collections", uuid, nil, &getback)
if err != nil {
path := keepServers[i] + "/index"
client := http.Client{}
req, err := http.NewRequest("GET", path, nil)
- req.Header.Add("Authorization", "OAuth2 " + AdminToken)
+ req.Header.Add("Authorization", "OAuth2 "+arvadostest.DataManagerToken)
req.Header.Add("Content-Type", "application/octet-stream")
resp, err := client.Do(req)
defer resp.Body.Close()
return indexes
}
-func verifyBlocks(t *testing.T, notExpected []string, expected []string) {
+func verifyBlocks(t *testing.T, notExpected []string, expected []string, minReplication int) {
blocks := getBlockIndexes(t)
for _, block := range notExpected {
nFound++
}
}
- if nFound < 2 {
- t.Fatalf("Found %d replicas of block %s, expected >= 2", nFound, block)
+ if nFound < minReplication {
+ t.Fatalf("Found %d replicas of block %s, expected >= %d", nFound, block, minReplication)
}
}
}
return false
}
-/*
-Test env uses two keep volumes. The volume names can be found by reading the files
- ARVADOS_HOME/tmp/keep0.volume and ARVADOS_HOME/tmp/keep1.volume
-
-The keep volumes are of the dir structure:
- volumeN/subdir/locator
-*/
+// Test env uses two keep volumes. The volume names can be found by reading the files
+// ARVADOS_HOME/tmp/keep0.volume and ARVADOS_HOME/tmp/keep1.volume
+//
+// The keep volumes are of the dir structure: volumeN/subdir/locator
func backdateBlocks(t *testing.T, oldUnusedBlockLocators []string) {
// First get rid of any size hints in the locators
var trimmedBlockLocators []string
func getStatus(t *testing.T, path string) interface{} {
client := http.Client{}
req, err := http.NewRequest("GET", path, nil)
- req.Header.Add("Authorization", "OAuth2 " + AdminToken)
+ req.Header.Add("Authorization", "OAuth2 "+arvadostest.DataManagerToken)
req.Header.Add("Content-Type", "application/octet-stream")
resp, err := client.Do(req)
if err != nil {
for _, ks := range keepServers {
for done := false; !done; {
time.Sleep(100 * time.Millisecond)
- s := getStatus(t, ks + "/status.json")
+ s := getStatus(t, ks+"/status.json")
for _, qName := range []string{"PullQueue", "TrashQueue"} {
qStatus := s.(map[string]interface{})[qName].(map[string]interface{})
- if qStatus["Queued"].(float64) + qStatus["InProgress"].(float64) == 0 {
+ if qStatus["Queued"].(float64)+qStatus["InProgress"].(float64) == 0 {
done = true
}
}
}
}
-/*
-Create some blocks and backdate some of them.
-Also create some collections and delete some of them.
-Verify block indexes.
-*/
+// Create some blocks and backdate some of them.
+// Also create some collections and delete some of them.
+// Verify block indexes.
func TestPutAndGetBlocks(t *testing.T) {
defer TearDownDataManagerTest(t)
SetupDataManagerTest(t)
}
// Create a collection that would be deleted later on
- toBeDeletedCollectionUuid := createCollection(t, "some data for collection creation")
- toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUuid)
+ toBeDeletedCollectionUUID := createCollection(t, "some data for collection creation")
+ toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUUID)
// Create another collection that has the same data as the one of the old blocks
- oldUsedBlockCollectionUuid := createCollection(t, oldUsedBlockData)
- oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUuid)
+ oldUsedBlockCollectionUUID := createCollection(t, oldUsedBlockData)
+ oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUUID)
if oldUsedBlockCollectionLocator != oldUsedBlockLocator {
t.Fatalf("Locator of the collection with the same data as old block is different %s", oldUsedBlockCollectionLocator)
}
// Create another collection whose replication level will be changed
- replicationCollectionUuid := createCollection(t, "replication level on this collection will be reduced")
- replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUuid)
+ replicationCollectionUUID := createCollection(t, "replication level on this collection will be reduced")
+ replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUUID)
// Create two collections with same data; one will be deleted later on
dataForTwoCollections := "one of these collections will be deleted"
- oneOfTwoWithSameDataUuid := createCollection(t, dataForTwoCollections)
- oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUuid)
- secondOfTwoWithSameDataUuid := createCollection(t, dataForTwoCollections)
- secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUuid)
+ oneOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
+ oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUUID)
+ secondOfTwoWithSameDataUUID := createCollection(t, dataForTwoCollections)
+ secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUUID)
if oneOfTwoWithSameDataLocator != secondOfTwoWithSameDataLocator {
t.Fatalf("Locators for both these collections expected to be same: %s %s", oneOfTwoWithSameDataLocator, secondOfTwoWithSameDataLocator)
}
+ // create collection with empty manifest text
+ emptyBlockLocator := putBlock(t, "")
+ emptyCollection := createCollection(t, "")
+
// Verify blocks before doing any backdating / deleting.
var expected []string
expected = append(expected, oldUnusedBlockLocators...)
expected = append(expected, replicationCollectionLocator)
expected = append(expected, oneOfTwoWithSameDataLocator)
expected = append(expected, secondOfTwoWithSameDataLocator)
+ expected = append(expected, emptyBlockLocator)
- verifyBlocks(t, nil, expected)
+ verifyBlocks(t, nil, expected, 2)
// Run datamanager in singlerun mode
dataManagerSingleRun(t)
waitUntilQueuesFinishWork(t)
- verifyBlocks(t, nil, expected)
+ verifyBlocks(t, nil, expected, 2)
// Backdate the to-be old blocks and delete the collections
backdateBlocks(t, oldUnusedBlockLocators)
- deleteCollection(t, toBeDeletedCollectionUuid)
- deleteCollection(t, secondOfTwoWithSameDataUuid)
+ deleteCollection(t, toBeDeletedCollectionUUID)
+ deleteCollection(t, secondOfTwoWithSameDataUUID)
+ backdateBlocks(t, []string{emptyBlockLocator})
+ deleteCollection(t, emptyCollection)
// Run data manager again
dataManagerSingleRun(t)
expected = append(expected, oldUsedBlockLocator)
expected = append(expected, newBlockLocators...)
expected = append(expected, toBeDeletedCollectionLocator)
- expected = append(expected, replicationCollectionLocator)
expected = append(expected, oneOfTwoWithSameDataLocator)
expected = append(expected, secondOfTwoWithSameDataLocator)
+ expected = append(expected, emptyBlockLocator) // even when unreferenced, this remains
- verifyBlocks(t, oldUnusedBlockLocators, expected)
-
- // Reduce replication on replicationCollectionUuid collection and verify that the overreplicated blocks are untouched.
+ verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
- // Default replication level is 2; first verify that the replicationCollectionLocator appears in both volumes
- for i := 0; i < len(keepServers); i++ {
- indexes := getBlockIndexesForServer(t, i)
- if !valueInArray(replicationCollectionLocator, indexes) {
- t.Fatalf("Not found block in index %s", replicationCollectionLocator)
- }
- }
+ // Reduce desired replication on replicationCollectionUUID
+ // collection, and verify that Data Manager does not reduce
+ // actual replication any further than that. (It might not
+ // reduce actual replication at all; that's OK for this test.)
- // Now reduce replication level on this collection and verify that it still appears in both volumes
- updateCollection(t, replicationCollectionUuid, "replication_desired", "1")
- collection := getCollection(t, replicationCollectionUuid)
+ // Reduce desired replication level.
+ updateCollection(t, replicationCollectionUUID, "replication_desired", "1")
+ collection := getCollection(t, replicationCollectionUUID)
if collection["replication_desired"].(interface{}) != float64(1) {
t.Fatalf("After update replication_desired is not 1; instead it is %v", collection["replication_desired"])
}
+ // Verify data is currently overreplicated.
+ verifyBlocks(t, nil, []string{replicationCollectionLocator}, 2)
+
// Run data manager again
dataManagerSingleRun(t)
waitUntilQueuesFinishWork(t)
- for i := 0; i < len(keepServers); i++ {
- indexes := getBlockIndexesForServer(t, i)
- if !valueInArray(replicationCollectionLocator, indexes) {
- t.Fatalf("Not found block in index %s", replicationCollectionLocator)
- }
- }
- // Done testing reduce replication on collection
+ // Verify data is not underreplicated.
+ verifyBlocks(t, nil, []string{replicationCollectionLocator}, 1)
- // Verify blocks one more time
- verifyBlocks(t, oldUnusedBlockLocators, expected)
+ // Verify *other* collections' data is not underreplicated.
+ verifyBlocks(t, oldUnusedBlockLocators, expected, 2)
}
func TestDatamanagerSingleRunRepeatedly(t *testing.T) {
defer TearDownDataManagerTest(t)
SetupDataManagerTest(t)
- arv.ApiToken = ActiveUserToken
+ arv.ApiToken = arvadostest.ActiveToken
err := singlerun(arv)
if err == nil {
t.Fatalf("Expected error during singlerun as non-admin user")
}
}
+
+func TestPutAndGetBlocks_NoErrorDuringSingleRun(t *testing.T) {
+ testOldBlocksNotDeletedOnDataManagerError(t, "", "", false, false)
+}
+
+func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadWriteTo(t *testing.T) {
+ testOldBlocksNotDeletedOnDataManagerError(t, "/badwritetofile", "", true, true)
+}
+
+func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadHeapProfileFilename(t *testing.T) {
+ testOldBlocksNotDeletedOnDataManagerError(t, "", "/badheapprofilefile", true, true)
+}
+
+// Create some blocks and backdate some of them.
+// Run datamanager while producing an error condition.
+// Verify that the blocks are hence not deleted.
+func testOldBlocksNotDeletedOnDataManagerError(t *testing.T, writeDataTo string, heapProfileFile string, expectError bool, expectOldBlocks bool) {
+ defer TearDownDataManagerTest(t)
+ SetupDataManagerTest(t)
+
+ // Put some blocks and backdate them.
+ var oldUnusedBlockLocators []string
+ oldUnusedBlockData := "this block will have older mtime"
+ for i := 0; i < 5; i++ {
+ oldUnusedBlockLocators = append(oldUnusedBlockLocators, putBlock(t, fmt.Sprintf("%s%d", oldUnusedBlockData, i)))
+ }
+ backdateBlocks(t, oldUnusedBlockLocators)
+
+ // Run data manager
+ summary.WriteDataTo = writeDataTo
+ collection.HeapProfileFilename = heapProfileFile
+
+ err := singlerun(arv)
+ if !expectError {
+ if err != nil {
+ t.Fatalf("Got an error during datamanager singlerun: %v", err)
+ }
+ } else {
+ if err == nil {
+ t.Fatalf("Expected error during datamanager singlerun")
+ }
+ }
+ waitUntilQueuesFinishWork(t)
+
+ // Get block indexes and verify that all backdated blocks are not/deleted as expected
+ if expectOldBlocks {
+ verifyBlocks(t, nil, oldUnusedBlockLocators, 2)
+ } else {
+ verifyBlocks(t, oldUnusedBlockLocators, nil, 2)
+ }
+}
+
+// Create a collection with multiple streams and blocks
+func createMultiStreamBlockCollection(t *testing.T, data string, numStreams, numBlocks int) (string, []string) {
+ defer switchToken(arvadostest.AdminToken)()
+
+ manifest := ""
+ locators := make(map[string]bool)
+ for s := 0; s < numStreams; s++ {
+ manifest += fmt.Sprintf("./stream%d ", s)
+ for b := 0; b < numBlocks; b++ {
+ locator, _, err := keepClient.PutB([]byte(fmt.Sprintf("%s in stream %d and block %d", data, s, b)))
+ if err != nil {
+ t.Fatalf("Error creating block %d in stream %d: %v", b, s, err)
+ }
+ locators[strings.Split(locator, "+A")[0]] = true
+ manifest += locator + " "
+ }
+ manifest += "0:1:dummyfile.txt\n"
+ }
+
+ collection := make(Dict)
+ err := arv.Create("collections",
+ arvadosclient.Dict{"collection": arvadosclient.Dict{"manifest_text": manifest}},
+ &collection)
+
+ if err != nil {
+ t.Fatalf("Error creating collection %v", err)
+ }
+
+ var locs []string
+ for k := range locators {
+ locs = append(locs, k)
+ }
+
+ return collection["uuid"].(string), locs
+}
+
+// Create collection with multiple streams and blocks; backdate the blocks and but do not delete the collection.
+// Also, create stray block and backdate it.
+// After datamanager run: expect blocks from the collection, but not the stray block.
+func TestManifestWithMultipleStreamsAndBlocks(t *testing.T) {
+ testManifestWithMultipleStreamsAndBlocks(t, 100, 10, "", false)
+}
+
+// Same test as TestManifestWithMultipleStreamsAndBlocks with an additional
+// keepstore of a service type other than "disk". Only the "disk" type services
+// will be indexed by datamanager and hence should work the same way.
+func TestManifestWithMultipleStreamsAndBlocks_WithOneUnsupportedKeepServer(t *testing.T) {
+ testManifestWithMultipleStreamsAndBlocks(t, 2, 2, "testblobstore", false)
+}
+
+// Test datamanager with dry-run. Expect no block to be deleted.
+func TestManifestWithMultipleStreamsAndBlocks_DryRun(t *testing.T) {
+ testManifestWithMultipleStreamsAndBlocks(t, 2, 2, "", true)
+}
+
+func testManifestWithMultipleStreamsAndBlocks(t *testing.T, numStreams, numBlocks int, createExtraKeepServerWithType string, isDryRun bool) {
+ defer TearDownDataManagerTest(t)
+ SetupDataManagerTest(t)
+
+ // create collection whose blocks will be backdated
+ collectionWithOldBlocks, oldBlocks := createMultiStreamBlockCollection(t, "old block", numStreams, numBlocks)
+ if collectionWithOldBlocks == "" {
+ t.Fatalf("Failed to create collection with %d blocks", numStreams*numBlocks)
+ }
+ if len(oldBlocks) != numStreams*numBlocks {
+ t.Fatalf("Not all blocks are created: expected %v, found %v", 1000, len(oldBlocks))
+ }
+
+ // create a stray block that will be backdated
+ strayOldBlock := putBlock(t, "this stray block is old")
+
+ expected := []string{strayOldBlock}
+ expected = append(expected, oldBlocks...)
+ verifyBlocks(t, nil, expected, 2)
+
+ // Backdate old blocks; but the collection still references these blocks
+ backdateBlocks(t, oldBlocks)
+
+ // also backdate the stray old block
+ backdateBlocks(t, []string{strayOldBlock})
+
+ // If requested, create an extra keepserver with the given type
+ // This should be ignored during indexing and hence not change the datamanager outcome
+ var extraKeepServerUUID string
+ if createExtraKeepServerWithType != "" {
+ extraKeepServerUUID = addExtraKeepServer(t, createExtraKeepServerWithType)
+ defer deleteExtraKeepServer(extraKeepServerUUID)
+ }
+
+ // run datamanager
+ dryRun = isDryRun
+ dataManagerSingleRun(t)
+
+ if dryRun {
+ // verify that all blocks, including strayOldBlock, are still to be found
+ verifyBlocks(t, nil, expected, 2)
+ } else {
+ // verify that strayOldBlock is not to be found, but the collections blocks are still there
+ verifyBlocks(t, []string{strayOldBlock}, oldBlocks, 2)
+ }
+}
+
+// Add one more keepstore with the given service type
+func addExtraKeepServer(t *testing.T, serviceType string) string {
+ defer switchToken(arvadostest.AdminToken)()
+
+ extraKeepService := make(arvadosclient.Dict)
+ err := arv.Create("keep_services",
+ arvadosclient.Dict{"keep_service": arvadosclient.Dict{
+ "service_host": "localhost",
+ "service_port": "21321",
+ "service_ssl_flag": false,
+ "service_type": serviceType}},
+ &extraKeepService)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return extraKeepService["uuid"].(string)
+}
+
+func deleteExtraKeepServer(uuid string) {
+ defer switchToken(arvadostest.AdminToken)()
+ arv.Delete("keep_services", uuid, nil, nil)
+}