"bytes"
"context"
"crypto/md5"
+ "errors"
"fmt"
"io"
"io/ioutil"
rwdev := map[string]*KeepService{}
for _, srv := range bal.KeepServices {
for _, mnt := range srv.mounts {
- if !mnt.ReadOnly && mnt.DeviceID != "" {
- rwdev[mnt.DeviceID] = srv
+ if !mnt.ReadOnly {
+ rwdev[mnt.UUID] = srv
}
}
}
for _, srv := range bal.KeepServices {
var dedup []*KeepMount
for _, mnt := range srv.mounts {
- if mnt.ReadOnly && rwdev[mnt.DeviceID] != nil {
- bal.logf("skipping srv %s readonly mount %q because same device %q is mounted read-write on srv %s", srv, mnt.UUID, mnt.DeviceID, rwdev[mnt.DeviceID])
+ if mnt.ReadOnly && rwdev[mnt.UUID] != nil {
+ bal.logf("skipping srv %s readonly mount %q because same volume is mounted read-write on srv %s", srv, mnt.UUID, rwdev[mnt.UUID])
} else {
dedup = append(dedup, mnt)
}
}
}
+ mountProblem := false
+ type deviceMount struct {
+ srv *KeepService
+ mnt *KeepMount
+ }
+ deviceMounted := map[string]deviceMount{} // DeviceID -> mount
+ for _, srv := range bal.KeepServices {
+ for _, mnt := range srv.mounts {
+ if first, dup := deviceMounted[mnt.DeviceID]; dup && first.mnt.UUID != mnt.UUID && mnt.DeviceID != "" {
+ bal.logf("config error: device %s is mounted with multiple volume UUIDs: %s on %s, and %s on %s",
+ mnt.DeviceID,
+ first.mnt.UUID, first.srv,
+ mnt.UUID, srv)
+ mountProblem = true
+ continue
+ }
+ deviceMounted[mnt.DeviceID] = deviceMount{srv, mnt}
+ }
+ }
+ if mountProblem {
+ return errors.New("cannot continue with config errors (see above)")
+ }
+
var checkPage arvados.CollectionList
if err = c.RequestAndDecode(&checkPage, "GET", "arvados/v1/collections", nil, arvados.ResourceListParams{
Limit: new(int),
deviceMount := map[string]*KeepMount{}
for _, srv := range bal.KeepServices {
for _, mnt := range srv.mounts {
- equiv := deviceMount[mnt.DeviceID]
+ equiv := deviceMount[mnt.UUID]
if equiv == nil {
equiv = mnt
- if mnt.DeviceID != "" {
- deviceMount[mnt.DeviceID] = equiv
- }
+ deviceMount[mnt.UUID] = equiv
}
equivMount[equiv] = append(equivMount[equiv], mnt)
}
// new/remaining replicas uniformly
// across qualifying mounts on a given
// server.
- return rendezvousLess(si.mnt.DeviceID, sj.mnt.DeviceID, blkid)
+ return rendezvousLess(si.mnt.UUID, sj.mnt.UUID, blkid)
}
})
// and returns true if all requirements are met.
trySlot := func(i int) bool {
slot := slots[i]
- if wantMnt[slot.mnt] || wantDev[slot.mnt.DeviceID] {
+ if wantMnt[slot.mnt] || wantDev[slot.mnt.UUID] {
// Already allocated a replica to this
// backend device, possibly on a
// different server.
slots[i].want = true
wantSrv[slot.mnt.KeepService] = true
wantMnt[slot.mnt] = true
- if slot.mnt.DeviceID != "" {
- wantDev[slot.mnt.DeviceID] = true
- }
+ wantDev[slot.mnt.UUID] = true
replWant += slot.mnt.Replication
}
return replProt >= desired && replWant >= desired
// haven't already been added to unsafeToDelete
// because the servers report different Mtimes.
for _, slot := range slots {
- if slot.repl != nil && wantDev[slot.mnt.DeviceID] {
+ if slot.repl != nil && wantDev[slot.mnt.UUID] {
unsafeToDelete[slot.repl.Mtime] = true
}
}
if onlyCount != nil && !onlyCount[slot.mnt] {
continue
}
- if countedDev[slot.mnt.DeviceID] {
+ if countedDev[slot.mnt.UUID] {
continue
}
switch {
bbs.pulling++
repl += slot.mnt.Replication
}
- if slot.mnt.DeviceID != "" {
- countedDev[slot.mnt.DeviceID] = true
- }
+ countedDev[slot.mnt.UUID] = true
}
if repl < needRepl {
bbs.unachievable = true
c.Check(pullReqs.Count(), check.Equals, 0)
}
+func (s *runSuite) TestRefuseSameDeviceDifferentVolumes(c *check.C) {
+ opts := RunOptions{
+ CommitPulls: true,
+ CommitTrash: true,
+ Logger: ctxlog.TestLogger(c),
+ }
+ s.stub.serveCurrentUserAdmin()
+ s.stub.serveZeroCollections()
+ s.stub.serveKeepServices(stubServices)
+ s.stub.mux.HandleFunc("/mounts", func(w http.ResponseWriter, r *http.Request) {
+ hostid := r.Host[:5] // "keep0.zzzzz.arvadosapi.com:25107" => "keep0"
+ json.NewEncoder(w).Encode([]arvados.KeepMount{{
+ UUID: "zzzzz-ivpuk-0000000000" + hostid,
+ DeviceID: "keep0-vol0",
+ StorageClasses: map[string]bool{"default": true},
+ }})
+ })
+ trashReqs := s.stub.serveKeepstoreTrash()
+ pullReqs := s.stub.serveKeepstorePull()
+ srv := s.newServer(&opts)
+ _, err := srv.runOnce()
+ c.Check(err, check.ErrorMatches, "cannot continue with config errors.*")
+ c.Check(trashReqs.Count(), check.Equals, 0)
+ c.Check(pullReqs.Count(), check.Equals, 0)
+}
+
func (s *runSuite) TestWriteLostBlocks(c *check.C) {
lostf, err := ioutil.TempFile("", "keep-balance-lost-blocks-test-")
c.Assert(err, check.IsNil)
srv.mounts[0].KeepMount.DeviceID = fmt.Sprintf("writable-by-srv-%x", i)
srv.mounts = append(srv.mounts, &KeepMount{
KeepMount: arvados.KeepMount{
- DeviceID: fmt.Sprintf("writable-by-srv-%x", (i+1)%len(bal.srvs)),
- UUID: fmt.Sprintf("zzzzz-mount-%015x", i<<16),
+ DeviceID: bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.DeviceID,
+ UUID: bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.UUID,
ReadOnly: readonly,
Replication: 1,
StorageClasses: map[string]bool{"default": true},
func (bal *balancerSuite) TestCleanupMounts(c *check.C) {
bal.srvs[3].mounts[0].KeepMount.ReadOnly = true
bal.srvs[3].mounts[0].KeepMount.DeviceID = "abcdef"
+ bal.srvs[14].mounts[0].KeepMount.UUID = bal.srvs[3].mounts[0].KeepMount.UUID
bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
c.Check(len(bal.srvs[3].mounts), check.Equals, 1)
bal.cleanupMounts()
}
func (bal *balancerSuite) TestDeviceRWMountedByMultipleServers(c *check.C) {
- bal.srvs[0].mounts[0].KeepMount.DeviceID = "abcdef"
- bal.srvs[9].mounts[0].KeepMount.DeviceID = "abcdef"
- bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
+ dupUUID := bal.srvs[0].mounts[0].KeepMount.UUID
+ bal.srvs[9].mounts[0].KeepMount.UUID = dupUUID
+ bal.srvs[14].mounts[0].KeepMount.UUID = dupUUID
// block 0 belongs on servers 3 and e, which have different
- // device IDs.
+ // UUIDs.
bal.try(c, tester{
known: 0,
desired: map[string]int{"default": 2},
current: slots{1},
shouldPull: slots{0}})
// block 1 belongs on servers 0 and 9, which both report
- // having a replica, but the replicas are on the same device
- // ID -- so we should pull to the third position (7).
+ // having a replica, but the replicas are on the same volume
+ // -- so we should pull to the third position (7).
bal.try(c, tester{
known: 1,
desired: map[string]int{"default": 2},
current: slots{0, 1},
shouldPull: slots{2}})
- // block 1 can be pulled to the doubly-mounted device, but the
+ // block 1 can be pulled to the doubly-mounted volume, but the
// pull should only be done on the first of the two servers.
bal.try(c, tester{
known: 1,
desired: map[string]int{"default": 2},
current: slots{2},
shouldPull: slots{0}})
- // block 0 has one replica on a single device mounted on two
+ // block 0 has one replica on a single volume mounted on two
// servers (e,9 at positions 1,9). Trashing the replica on 9
// would lose the block.
bal.try(c, tester{
pulling: 1,
}})
// block 0 is overreplicated, but the second and third
- // replicas are the same replica according to DeviceID
+ // replicas are the same replica according to volume UUID
// (despite different Mtimes). Don't trash the third replica.
bal.try(c, tester{
known: 0,
desired: map[string]int{"default": 2, "special": 1},
current: slots{0, 1},
shouldPull: slots{9},
- shouldPullMounts: []string{"zzzzz-mount-special00000009"}})
+ shouldPullMounts: []string{"zzzzz-mount-special20000009"}})
// If some storage classes are not satisfied, don't trash any
// excess replicas. (E.g., if someone desires repl=1 on
// class=durable, and we have two copies on class=volatile, we
desired: map[string]int{"special": 1},
current: slots{0, 1},
shouldPull: slots{9},
- shouldPullMounts: []string{"zzzzz-mount-special00000009"}})
+ shouldPullMounts: []string{"zzzzz-mount-special20000009"}})
// Once storage classes are satisfied, trash excess replicas
// that appear earlier in probe order but aren't needed to
// satisfy the desired classes.