// Get collection locator
var locatorMatcher = regexp.MustCompile(`^([0-9a-f]{32})\+(\d*)(.*)$`)
-func getCollectionLocator(t *testing.T, uuid string) string {
+func getFirstLocatorFromCollection(t *testing.T, uuid string) string {
manifest := getCollection(t, uuid)["manifest_text"].(string)
locator := strings.Split(manifest, " ")[1]
}
for _, block := range expected {
- if blockExists[block] == nil || len(blockExists[block]) != 2 {
+ if blockExists[block] == nil || len(blockExists[block]) < 2 {
t.Fatalf("Expected to find two replicas for block %s; found %d", block, len(blockExists[block]))
}
}
// Create a collection that would be deleted later on
toBeDeletedCollectionUuid := createCollection(t, "some data for collection creation")
- toBeDeletedCollectionLocator := getCollectionLocator(t, toBeDeletedCollectionUuid)
+ toBeDeletedCollectionLocator := getFirstLocatorFromCollection(t, toBeDeletedCollectionUuid)
// Create another collection that has the same data as the one of the old blocks
oldUsedBlockCollectionUuid := createCollection(t, oldUsedBlockData)
- oldUsedBlockCollectionLocator := getCollectionLocator(t, oldUsedBlockCollectionUuid)
+ oldUsedBlockCollectionLocator := getFirstLocatorFromCollection(t, oldUsedBlockCollectionUuid)
if oldUsedBlockCollectionLocator != oldUsedBlockLocator {
t.Fatalf("Locator of the collection with the same data as old block is different %s", oldUsedBlockCollectionLocator)
}
// Create another collection whose replication level will be changed
replicationCollectionUuid := createCollection(t, "replication level on this collection will be reduced")
- replicationCollectionLocator := getCollectionLocator(t, replicationCollectionUuid)
+ replicationCollectionLocator := getFirstLocatorFromCollection(t, replicationCollectionUuid)
// Create two collections with same data; one will be deleted later on
dataForTwoCollections := "one of these collections will be deleted"
oneOfTwoWithSameDataUuid := createCollection(t, dataForTwoCollections)
- oneOfTwoWithSameDataLocator := getCollectionLocator(t, oneOfTwoWithSameDataUuid)
+ oneOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, oneOfTwoWithSameDataUuid)
secondOfTwoWithSameDataUuid := createCollection(t, dataForTwoCollections)
- secondOfTwoWithSameDataLocator := getCollectionLocator(t, secondOfTwoWithSameDataUuid)
+ secondOfTwoWithSameDataLocator := getFirstLocatorFromCollection(t, secondOfTwoWithSameDataUuid)
if oneOfTwoWithSameDataLocator != secondOfTwoWithSameDataLocator {
t.Fatalf("Locators for both these collections expected to be same: %s %s", oneOfTwoWithSameDataLocator, secondOfTwoWithSameDataLocator)
}
verifyBlocks(t, oldUnusedBlockLocators, expected)
}
-func _TestDatamanagerSingleRunRepeatedly(t *testing.T) {
+func TestDatamanagerSingleRunRepeatedly(t *testing.T) {
log.Print("TestDatamanagerSingleRunRepeatedly start")
defer TearDownDataManagerTest(t)
}
}
-func _TestGetStatusRepeatedly(t *testing.T) {
- log.Print("TestGetStatusRepeatedly start")
+func TestGetStatusRepeatedly(t *testing.T) {
+ t.Skip("This test still fails. Skip it until it is fixed.")
defer TearDownDataManagerTest(t)
SetupDataManagerTest(t)
// in order to permit writes.
const MIN_FREE_KILOBYTES = BLOCKSIZE / 1024
+// Until #6221 is resolved, never_delete must be true.
+// However, allow it to be false in testing.
+const TEST_DATA_MANAGER_TOKEN = "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h"
+
var PROC_MOUNTS = "/proc/mounts"
// enforce_permissions controls whether permission signatures
log.Fatalf("reading data manager token: %s\n", err)
}
}
+
+ if never_delete != true && data_manager_token != TEST_DATA_MANAGER_TOKEN {
+ log.Fatal("never_delete must be true, see #6221")
+ }
+
if blob_signing_key_file != "" {
if buf, err := ioutil.ReadFile(blob_signing_key_file); err == nil {
PermissionSecret = bytes.TrimSpace(buf)