+ repl := cfgvol.Replication
+ if repl < 1 {
+ repl = 1
+ }
+ mnt := &VolumeMount{
+ KeepMount: arvados.KeepMount{
+ UUID: uuid,
+ DeviceID: vol.GetDeviceID(),
+ ReadOnly: cfgvol.ReadOnly || va.ReadOnly,
+ Replication: repl,
+ StorageClasses: sc,
+ },
+ Volume: vol,
+ }
+ vm.iostats[vol] = &ioStats{}
+ vm.mounts = append(vm.mounts, mnt)
+ vm.mountMap[uuid] = mnt
+ vm.readables = append(vm.readables, mnt)
+ if !mnt.KeepMount.ReadOnly {
+ vm.writables = append(vm.writables, mnt)
+ }
+ }
+ // pri(mnt): return highest priority of any storage class
+ // offered by mnt
+ pri := func(mnt *VolumeMount) int {
+ any, best := false, 0
+ for class := range mnt.KeepMount.StorageClasses {
+ if p := cluster.StorageClasses[class].Priority; !any || best < p {
+ best = p
+ any = true
+ }
+ }
+ return best
+ }
+ // less(a,b): sort first by highest priority of any offered
+ // storage class (highest->lowest), then by volume UUID
+ less := func(a, b *VolumeMount) bool {
+ if pa, pb := pri(a), pri(b); pa != pb {
+ return pa > pb
+ } else {
+ return a.KeepMount.UUID < b.KeepMount.UUID
+ }
+ }
+ sort.Slice(vm.readables, func(i, j int) bool {
+ return less(vm.readables[i], vm.readables[j])
+ })
+ sort.Slice(vm.writables, func(i, j int) bool {
+ return less(vm.writables[i], vm.writables[j])
+ })
+ sort.Slice(vm.mounts, func(i, j int) bool {
+ return less(vm.mounts[i], vm.mounts[j])
+ })
+ return vm, nil
+}
+
+func (vm *RRVolumeManager) Mounts() []*VolumeMount {
+ return vm.mounts
+}
+
+func (vm *RRVolumeManager) Lookup(uuid string, needWrite bool) *VolumeMount {
+ if mnt, ok := vm.mountMap[uuid]; ok && (!needWrite || !mnt.ReadOnly) {
+ return mnt