X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/e99f026d040c6020dfcc51c6d988cf18d325a530..da83807d6bcef1c1f0bb78479c5ec17f150f5eda:/services/keep-balance/balance_test.go diff --git a/services/keep-balance/balance_test.go b/services/keep-balance/balance_test.go index c529ac150e..cb61ea58cc 100644 --- a/services/keep-balance/balance_test.go +++ b/services/keep-balance/balance_test.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: AGPL-3.0 -package main +package keepbalance import ( "crypto/md5" @@ -87,6 +87,8 @@ func (bal *balancerSuite) SetUpTest(c *check.C) { KeepMount: arvados.KeepMount{ UUID: fmt.Sprintf("zzzzz-mount-%015x", i), StorageClasses: map[string]bool{"default": true}, + AllowWrite: true, + AllowTrash: true, }, KeepService: srv, }} @@ -167,9 +169,10 @@ func (bal *balancerSuite) testMultipleViews(c *check.C, readonly bool) { srv.mounts[0].KeepMount.DeviceID = fmt.Sprintf("writable-by-srv-%x", i) srv.mounts = append(srv.mounts, &KeepMount{ KeepMount: arvados.KeepMount{ - DeviceID: fmt.Sprintf("writable-by-srv-%x", (i+1)%len(bal.srvs)), - UUID: fmt.Sprintf("zzzzz-mount-%015x", i<<16), - ReadOnly: readonly, + DeviceID: bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.DeviceID, + UUID: bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.UUID, + AllowWrite: !readonly, + AllowTrash: !readonly, Replication: 1, StorageClasses: map[string]bool{"default": true}, }, @@ -321,6 +324,35 @@ func (bal *balancerSuite) TestDecreaseReplTimestampCollision(c *check.C) { desired: map[string]int{"default": 2}, current: slots{0, 1, 2}, timestamps: []int64{12345678, 10000000, 10000000}}) + bal.try(c, tester{ + desired: map[string]int{"default": 0}, + current: slots{0, 1, 2}, + timestamps: []int64{12345678, 12345678, 12345678}, + shouldTrash: slots{0}, + shouldTrashMounts: []string{ + bal.srvs[bal.knownRendezvous[0][0]].mounts[0].UUID}}) + bal.try(c, tester{ + desired: map[string]int{"default": 2}, + current: slots{0, 1, 2, 5, 6}, + timestamps: []int64{12345678, 12345679, 10000000, 10000000, 10000000}, + shouldTrash: slots{2}, + shouldTrashMounts: []string{ + bal.srvs[bal.knownRendezvous[0][2]].mounts[0].UUID}}) + bal.try(c, tester{ + desired: map[string]int{"default": 2}, + current: slots{0, 1, 2, 5, 6}, + timestamps: []int64{12345678, 12345679, 12345671, 10000000, 10000000}, + shouldTrash: slots{2, 5}, + shouldTrashMounts: []string{ + bal.srvs[bal.knownRendezvous[0][2]].mounts[0].UUID, + bal.srvs[bal.knownRendezvous[0][5]].mounts[0].UUID}}) + bal.try(c, tester{ + desired: map[string]int{"default": 2}, + current: slots{0, 1, 2, 5, 6}, + timestamps: []int64{12345678, 12345679, 12345679, 10000000, 10000000}, + shouldTrash: slots{5}, + shouldTrashMounts: []string{ + bal.srvs[bal.knownRendezvous[0][5]].mounts[0].UUID}}) } func (bal *balancerSuite) TestDecreaseReplBlockTooNew(c *check.C) { @@ -345,8 +377,9 @@ func (bal *balancerSuite) TestDecreaseReplBlockTooNew(c *check.C) { } func (bal *balancerSuite) TestCleanupMounts(c *check.C) { - bal.srvs[3].mounts[0].KeepMount.ReadOnly = true + bal.srvs[3].mounts[0].KeepMount.AllowWrite = false bal.srvs[3].mounts[0].KeepMount.DeviceID = "abcdef" + bal.srvs[14].mounts[0].KeepMount.UUID = bal.srvs[3].mounts[0].KeepMount.UUID bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef" c.Check(len(bal.srvs[3].mounts), check.Equals, 1) bal.cleanupMounts() @@ -485,32 +518,32 @@ func (bal *balancerSuite) TestVolumeReplication(c *check.C) { } func (bal *balancerSuite) TestDeviceRWMountedByMultipleServers(c *check.C) { - bal.srvs[0].mounts[0].KeepMount.DeviceID = "abcdef" - bal.srvs[9].mounts[0].KeepMount.DeviceID = "abcdef" - bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef" + dupUUID := bal.srvs[0].mounts[0].KeepMount.UUID + bal.srvs[9].mounts[0].KeepMount.UUID = dupUUID + bal.srvs[14].mounts[0].KeepMount.UUID = dupUUID // block 0 belongs on servers 3 and e, which have different - // device IDs. + // UUIDs. bal.try(c, tester{ known: 0, desired: map[string]int{"default": 2}, current: slots{1}, shouldPull: slots{0}}) // block 1 belongs on servers 0 and 9, which both report - // having a replica, but the replicas are on the same device - // ID -- so we should pull to the third position (7). + // having a replica, but the replicas are on the same volume + // -- so we should pull to the third position (7). bal.try(c, tester{ known: 1, desired: map[string]int{"default": 2}, current: slots{0, 1}, shouldPull: slots{2}}) - // block 1 can be pulled to the doubly-mounted device, but the + // block 1 can be pulled to the doubly-mounted volume, but the // pull should only be done on the first of the two servers. bal.try(c, tester{ known: 1, desired: map[string]int{"default": 2}, current: slots{2}, shouldPull: slots{0}}) - // block 0 has one replica on a single device mounted on two + // block 0 has one replica on a single volume mounted on two // servers (e,9 at positions 1,9). Trashing the replica on 9 // would lose the block. bal.try(c, tester{ @@ -523,7 +556,7 @@ func (bal *balancerSuite) TestDeviceRWMountedByMultipleServers(c *check.C) { pulling: 1, }}) // block 0 is overreplicated, but the second and third - // replicas are the same replica according to DeviceID + // replicas are the same replica according to volume UUID // (despite different Mtimes). Don't trash the third replica. bal.try(c, tester{ known: 0, @@ -553,6 +586,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) { // classes=[special,special2]. bal.srvs[9].mounts = []*KeepMount{{ KeepMount: arvados.KeepMount{ + AllowWrite: true, + AllowTrash: true, Replication: 1, StorageClasses: map[string]bool{"special": true}, UUID: "zzzzz-mount-special00000009", @@ -561,6 +596,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) { KeepService: bal.srvs[9], }, { KeepMount: arvados.KeepMount{ + AllowWrite: true, + AllowTrash: true, Replication: 1, StorageClasses: map[string]bool{"special": true, "special2": true}, UUID: "zzzzz-mount-special20000009", @@ -573,6 +610,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) { // classes=[special3], one with classes=[default]. bal.srvs[13].mounts = []*KeepMount{{ KeepMount: arvados.KeepMount{ + AllowWrite: true, + AllowTrash: true, Replication: 1, StorageClasses: map[string]bool{"special2": true}, UUID: "zzzzz-mount-special2000000d", @@ -581,6 +620,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) { KeepService: bal.srvs[13], }, { KeepMount: arvados.KeepMount{ + AllowWrite: true, + AllowTrash: true, Replication: 1, StorageClasses: map[string]bool{"default": true}, UUID: "zzzzz-mount-00000000000000d", @@ -595,7 +636,7 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) { desired: map[string]int{"default": 2, "special": 1}, current: slots{0, 1}, shouldPull: slots{9}, - shouldPullMounts: []string{"zzzzz-mount-special00000009"}}) + shouldPullMounts: []string{"zzzzz-mount-special20000009"}}) // If some storage classes are not satisfied, don't trash any // excess replicas. (E.g., if someone desires repl=1 on // class=durable, and we have two copies on class=volatile, we @@ -605,7 +646,7 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) { desired: map[string]int{"special": 1}, current: slots{0, 1}, shouldPull: slots{9}, - shouldPullMounts: []string{"zzzzz-mount-special00000009"}}) + shouldPullMounts: []string{"zzzzz-mount-special20000009"}}) // Once storage classes are satisfied, trash excess replicas // that appear earlier in probe order but aren't needed to // satisfy the desired classes.