Split KeepMount's ReadOnly flag into AllowWrite and AllowTrash flags.
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@curii.com>
ReadOnly: false
"http://host1.example:25107": {}
ReadOnly: false
+ AllowTrashWhenReadOnly: false
Replication: 1
StorageClasses:
# If you have configured storage classes (see StorageClasses
}
type Volume struct {
- AccessViaHosts map[URL]VolumeAccess
- ReadOnly bool
- Replication int
- StorageClasses map[string]bool
- Driver string
- DriverParameters json.RawMessage
+ AccessViaHosts map[URL]VolumeAccess
+ ReadOnly bool
+ AllowTrashWhenReadOnly bool
+ Replication int
+ StorageClasses map[string]bool
+ Driver string
+ DriverParameters json.RawMessage
}
type S3VolumeDriverParameters struct {
type KeepMount struct {
UUID string `json:"uuid"`
DeviceID string `json:"device_id"`
- ReadOnly bool `json:"read_only"`
+ AllowWrite bool `json:"allow_write"`
+ AllowTrash bool `json:"allow_trash"`
Replication int `json:"replication"`
StorageClasses map[string]bool `json:"storage_classes"`
}
rwdev := map[string]*KeepService{}
for _, srv := range bal.KeepServices {
for _, mnt := range srv.mounts {
- if !mnt.ReadOnly {
+ if mnt.AllowWrite {
rwdev[mnt.UUID] = srv
}
}
for _, srv := range bal.KeepServices {
var dedup []*KeepMount
for _, mnt := range srv.mounts {
- if mnt.ReadOnly && rwdev[mnt.UUID] != nil {
+ if !mnt.AllowWrite && rwdev[mnt.UUID] != nil {
bal.logf("skipping srv %s readonly mount %q because same volume is mounted read-write on srv %s", srv, mnt.UUID, rwdev[mnt.UUID])
} else {
dedup = append(dedup, mnt)
for _, mnt := range srv.mounts {
bal.mounts++
- // All mounts on a read-only service are
- // effectively read-only.
- mnt.ReadOnly = mnt.ReadOnly || srv.ReadOnly
+ if srv.ReadOnly {
+ // All mounts on a read-only service
+ // are effectively read-only.
+ mnt.AllowWrite = false
+ }
for class := range mnt.StorageClasses {
if mbc := bal.mountsByClass[class]; mbc == nil {
slots = append(slots, slot{
mnt: mnt,
repl: repl,
- want: repl != nil && mnt.ReadOnly,
+ want: repl != nil && !mnt.AllowTrash,
})
}
}
protMnt[slot.mnt] = true
replProt += slot.mnt.Replication
}
- if replWant < desired && (slot.repl != nil || !slot.mnt.ReadOnly) {
+ if replWant < desired && (slot.repl != nil || slot.mnt.AllowWrite) {
slots[i].want = true
wantSrv[slot.mnt.KeepService] = true
wantMnt[slot.mnt] = true
case slot.repl == nil && slot.want && len(blk.Replicas) == 0:
lost = true
change = changeNone
- case slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
+ case slot.repl == nil && slot.want && slot.mnt.AllowWrite:
slot.mnt.KeepService.AddPull(Pull{
SizedDigest: blkid,
From: blk.Replicas[0].KeepMount.KeepService,
UUID: "zzzzz-ivpuk-000000000000000",
DeviceID: "keep0-vol0",
StorageClasses: map[string]bool{"default": true},
+ AllowWrite: true,
+ AllowTrash: true,
}},
"keep1.zzzzz.arvadosapi.com:25107": {{
UUID: "zzzzz-ivpuk-100000000000000",
DeviceID: "keep1-vol0",
StorageClasses: map[string]bool{"default": true},
+ AllowWrite: true,
+ AllowTrash: true,
}},
"keep2.zzzzz.arvadosapi.com:25107": {{
UUID: "zzzzz-ivpuk-200000000000000",
DeviceID: "keep2-vol0",
StorageClasses: map[string]bool{"default": true},
+ AllowWrite: true,
+ AllowTrash: true,
}},
"keep3.zzzzz.arvadosapi.com:25107": {{
UUID: "zzzzz-ivpuk-300000000000000",
DeviceID: "keep3-vol0",
StorageClasses: map[string]bool{"default": true},
+ AllowWrite: true,
+ AllowTrash: true,
}},
}
KeepMount: arvados.KeepMount{
UUID: fmt.Sprintf("zzzzz-mount-%015x", i),
StorageClasses: map[string]bool{"default": true},
+ AllowWrite: true,
+ AllowTrash: true,
},
KeepService: srv,
}}
KeepMount: arvados.KeepMount{
DeviceID: bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.DeviceID,
UUID: bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.UUID,
- ReadOnly: readonly,
+ AllowWrite: !readonly,
+ AllowTrash: !readonly,
Replication: 1,
StorageClasses: map[string]bool{"default": true},
},
}
func (bal *balancerSuite) TestCleanupMounts(c *check.C) {
- bal.srvs[3].mounts[0].KeepMount.ReadOnly = true
+ bal.srvs[3].mounts[0].KeepMount.AllowWrite = false
bal.srvs[3].mounts[0].KeepMount.DeviceID = "abcdef"
bal.srvs[14].mounts[0].KeepMount.UUID = bal.srvs[3].mounts[0].KeepMount.UUID
bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
// classes=[special,special2].
bal.srvs[9].mounts = []*KeepMount{{
KeepMount: arvados.KeepMount{
+ AllowWrite: true,
+ AllowTrash: true,
Replication: 1,
StorageClasses: map[string]bool{"special": true},
UUID: "zzzzz-mount-special00000009",
KeepService: bal.srvs[9],
}, {
KeepMount: arvados.KeepMount{
+ AllowWrite: true,
+ AllowTrash: true,
Replication: 1,
StorageClasses: map[string]bool{"special": true, "special2": true},
UUID: "zzzzz-mount-special20000009",
// classes=[special3], one with classes=[default].
bal.srvs[13].mounts = []*KeepMount{{
KeepMount: arvados.KeepMount{
+ AllowWrite: true,
+ AllowTrash: true,
Replication: 1,
StorageClasses: map[string]bool{"special2": true},
UUID: "zzzzz-mount-special2000000d",
KeepService: bal.srvs[13],
}, {
KeepMount: arvados.KeepMount{
+ AllowWrite: true,
+ AllowTrash: true,
Replication: 1,
StorageClasses: map[string]bool{"default": true},
UUID: "zzzzz-mount-00000000000000d",
// Trash a Keep block.
func (v *AzureBlobVolume) Trash(loc string) error {
- if v.volume.ReadOnly {
+ if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
return MethodDisabledError
}
-
// Ideally we would use If-Unmodified-Since, but that
// particular condition seems to be ignored by Azure. Instead,
// we get the Etag before checking Mtime, and use If-Match to
if d := h.Cluster.Collections.BlobTrashCheckInterval.Duration(); d > 0 &&
h.Cluster.Collections.BlobTrash &&
h.Cluster.Collections.BlobDeleteConcurrency > 0 {
- go emptyTrash(h.volmgr.writables, d)
+ go emptyTrash(h.volmgr.mounts, d)
}
return nil
Deleted int `json:"copies_deleted"`
Failed int `json:"copies_failed"`
}
- for _, vol := range rtr.volmgr.AllWritable() {
- if err := vol.Trash(hash); err == nil {
+ for _, vol := range rtr.volmgr.Mounts() {
+ if !vol.KeepMount.AllowTrash {
+ continue
+ } else if err := vol.Trash(hash); err == nil {
result.Deleted++
} else if os.IsNotExist(err) {
continue
func emptyTrash(mounts []*VolumeMount, interval time.Duration) {
for range time.NewTicker(interval).C {
for _, v := range mounts {
- v.EmptyTrash()
+ if v.KeepMount.AllowTrash {
+ v.EmptyTrash()
+ }
}
}
}
// Trash a Keep block.
func (v *S3AWSVolume) Trash(loc string) error {
- if v.volume.ReadOnly {
+ if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
return MethodDisabledError
}
if t, err := v.Mtime(loc); err != nil {
var volumes []*VolumeMount
if uuid := trashRequest.MountUUID; uuid == "" {
- volumes = volmgr.AllWritable()
- } else if mnt := volmgr.Lookup(uuid, true); mnt == nil {
+ volumes = volmgr.Mounts()
+ } else if mnt := volmgr.Lookup(uuid, false); mnt == nil {
logger.Warnf("trash request for nonexistent mount: %v", trashRequest)
return
+ } else if !mnt.KeepMount.AllowTrash {
+ logger.Warnf("trash request for mount with ReadOnly=true, AllowTrashWhenReadOnly=false: %v", trashRequest)
} else {
volumes = []*VolumeMount{mnt}
}
// be re-written), or (b) Touch() will update the file's timestamp and
// Trash() will read the correct up-to-date timestamp and choose not to
// trash the file.
-
- if v.volume.ReadOnly || !v.cluster.Collections.BlobTrash {
+ if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
return MethodDisabledError
}
if err := v.lock(context.TODO()); err != nil {
if err != nil {
return nil, fmt.Errorf("error initializing volume %s: %s", uuid, err)
}
- logger.Printf("started volume %s (%s), ReadOnly=%v", uuid, vol, cfgvol.ReadOnly || va.ReadOnly)
-
sc := cfgvol.StorageClasses
if len(sc) == 0 {
sc = map[string]bool{"default": true}
KeepMount: arvados.KeepMount{
UUID: uuid,
DeviceID: vol.GetDeviceID(),
- ReadOnly: cfgvol.ReadOnly || va.ReadOnly,
+ AllowWrite: !va.ReadOnly && !cfgvol.ReadOnly,
+ AllowTrash: !va.ReadOnly && (!cfgvol.ReadOnly || cfgvol.AllowTrashWhenReadOnly),
Replication: repl,
StorageClasses: sc,
},
vm.mounts = append(vm.mounts, mnt)
vm.mountMap[uuid] = mnt
vm.readables = append(vm.readables, mnt)
- if !mnt.KeepMount.ReadOnly {
+ if mnt.KeepMount.AllowWrite {
vm.writables = append(vm.writables, mnt)
}
+ logger.Printf("started volume %s (%s), AllowWrite=%v, AllowTrash=%v", uuid, vol, mnt.AllowWrite, mnt.AllowTrash)
}
// pri(mnt): return highest priority of any storage class
// offered by mnt
}
func (vm *RRVolumeManager) Lookup(uuid string, needWrite bool) *VolumeMount {
- if mnt, ok := vm.mountMap[uuid]; ok && (!needWrite || !mnt.ReadOnly) {
+ if mnt, ok := vm.mountMap[uuid]; ok && (!needWrite || mnt.AllowWrite) {
return mnt
}
return nil