21126: Add AllowTrashWhenReadOnly flag.
authorTom Clegg <tom@curii.com>
Thu, 26 Oct 2023 20:15:16 +0000 (16:15 -0400)
committerTom Clegg <tom@curii.com>
Thu, 26 Oct 2023 20:15:16 +0000 (16:15 -0400)
Split KeepMount's ReadOnly flag into AllowWrite and AllowTrash flags.

Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@curii.com>

14 files changed:
lib/config/config.default.yml
sdk/go/arvados/config.go
sdk/go/arvados/keep_service.go
services/keep-balance/balance.go
services/keep-balance/balance_run_test.go
services/keep-balance/balance_test.go
services/keepstore/azure_blob_volume.go
services/keepstore/command.go
services/keepstore/handlers.go
services/keepstore/keepstore.go
services/keepstore/s3aws_volume.go
services/keepstore/trash_worker.go
services/keepstore/unix_volume.go
services/keepstore/volume.go

index 32727b1bce78226ac34e27d3eb9791c173a34cbc..593abad8541770b91a3ac3f0eb8fcbde1a7de877 100644 (file)
@@ -1684,6 +1684,7 @@ Clusters:
             ReadOnly: false
           "http://host1.example:25107": {}
         ReadOnly: false
+        AllowTrashWhenReadOnly: false
         Replication: 1
         StorageClasses:
           # If you have configured storage classes (see StorageClasses
index a3e54952da483c46a87416b200291bc65b66304b..5db0222e27f4e08242354f2b93a05aeb65f87757 100644 (file)
@@ -316,12 +316,13 @@ type StorageClassConfig struct {
 }
 
 type Volume struct {
-       AccessViaHosts   map[URL]VolumeAccess
-       ReadOnly         bool
-       Replication      int
-       StorageClasses   map[string]bool
-       Driver           string
-       DriverParameters json.RawMessage
+       AccessViaHosts         map[URL]VolumeAccess
+       ReadOnly               bool
+       AllowTrashWhenReadOnly bool
+       Replication            int
+       StorageClasses         map[string]bool
+       Driver                 string
+       DriverParameters       json.RawMessage
 }
 
 type S3VolumeDriverParameters struct {
index 5b6d71a4fb73953109b884156d39e6143c92e4a4..85750d8cfc97d8530794c6c4dfdbfc78aa258bb4 100644 (file)
@@ -33,7 +33,8 @@ type KeepService struct {
 type KeepMount struct {
        UUID           string          `json:"uuid"`
        DeviceID       string          `json:"device_id"`
-       ReadOnly       bool            `json:"read_only"`
+       AllowWrite     bool            `json:"allow_write"`
+       AllowTrash     bool            `json:"allow_trash"`
        Replication    int             `json:"replication"`
        StorageClasses map[string]bool `json:"storage_classes"`
 }
index 215c5e1b5be1355e9f6793da54ab0c3148eff242..e44dfeda8748aec51e18cd55118c15d509d8fc12 100644 (file)
@@ -227,7 +227,7 @@ func (bal *Balancer) cleanupMounts() {
        rwdev := map[string]*KeepService{}
        for _, srv := range bal.KeepServices {
                for _, mnt := range srv.mounts {
-                       if !mnt.ReadOnly {
+                       if mnt.AllowWrite {
                                rwdev[mnt.UUID] = srv
                        }
                }
@@ -237,7 +237,7 @@ func (bal *Balancer) cleanupMounts() {
        for _, srv := range bal.KeepServices {
                var dedup []*KeepMount
                for _, mnt := range srv.mounts {
-                       if mnt.ReadOnly && rwdev[mnt.UUID] != nil {
+                       if !mnt.AllowWrite && rwdev[mnt.UUID] != nil {
                                bal.logf("skipping srv %s readonly mount %q because same volume is mounted read-write on srv %s", srv, mnt.UUID, rwdev[mnt.UUID])
                        } else {
                                dedup = append(dedup, mnt)
@@ -587,9 +587,11 @@ func (bal *Balancer) setupLookupTables() {
                for _, mnt := range srv.mounts {
                        bal.mounts++
 
-                       // All mounts on a read-only service are
-                       // effectively read-only.
-                       mnt.ReadOnly = mnt.ReadOnly || srv.ReadOnly
+                       if srv.ReadOnly {
+                               // All mounts on a read-only service
+                               // are effectively read-only.
+                               mnt.AllowWrite = false
+                       }
 
                        for class := range mnt.StorageClasses {
                                if mbc := bal.mountsByClass[class]; mbc == nil {
@@ -667,7 +669,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                        slots = append(slots, slot{
                                mnt:  mnt,
                                repl: repl,
-                               want: repl != nil && mnt.ReadOnly,
+                               want: repl != nil && !mnt.AllowTrash,
                        })
                }
        }
@@ -756,7 +758,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                                protMnt[slot.mnt] = true
                                replProt += slot.mnt.Replication
                        }
-                       if replWant < desired && (slot.repl != nil || !slot.mnt.ReadOnly) {
+                       if replWant < desired && (slot.repl != nil || slot.mnt.AllowWrite) {
                                slots[i].want = true
                                wantSrv[slot.mnt.KeepService] = true
                                wantMnt[slot.mnt] = true
@@ -875,7 +877,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                case slot.repl == nil && slot.want && len(blk.Replicas) == 0:
                        lost = true
                        change = changeNone
-               case slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
+               case slot.repl == nil && slot.want && slot.mnt.AllowWrite:
                        slot.mnt.KeepService.AddPull(Pull{
                                SizedDigest: blkid,
                                From:        blk.Replicas[0].KeepMount.KeepService,
index db9a668481105e7b16940dcebcc25761b153fc89..962bd40adefc8e360be0749ee5a700bcb727821a 100644 (file)
@@ -89,21 +89,29 @@ var stubMounts = map[string][]arvados.KeepMount{
                UUID:           "zzzzz-ivpuk-000000000000000",
                DeviceID:       "keep0-vol0",
                StorageClasses: map[string]bool{"default": true},
+               AllowWrite:     true,
+               AllowTrash:     true,
        }},
        "keep1.zzzzz.arvadosapi.com:25107": {{
                UUID:           "zzzzz-ivpuk-100000000000000",
                DeviceID:       "keep1-vol0",
                StorageClasses: map[string]bool{"default": true},
+               AllowWrite:     true,
+               AllowTrash:     true,
        }},
        "keep2.zzzzz.arvadosapi.com:25107": {{
                UUID:           "zzzzz-ivpuk-200000000000000",
                DeviceID:       "keep2-vol0",
                StorageClasses: map[string]bool{"default": true},
+               AllowWrite:     true,
+               AllowTrash:     true,
        }},
        "keep3.zzzzz.arvadosapi.com:25107": {{
                UUID:           "zzzzz-ivpuk-300000000000000",
                DeviceID:       "keep3-vol0",
                StorageClasses: map[string]bool{"default": true},
+               AllowWrite:     true,
+               AllowTrash:     true,
        }},
 }
 
index f9fca1431b65f4c944618a37f76a7c8cfddcb8a1..cb61ea58cc492de7227047137ddcae3cdffed282 100644 (file)
@@ -87,6 +87,8 @@ func (bal *balancerSuite) SetUpTest(c *check.C) {
                        KeepMount: arvados.KeepMount{
                                UUID:           fmt.Sprintf("zzzzz-mount-%015x", i),
                                StorageClasses: map[string]bool{"default": true},
+                               AllowWrite:     true,
+                               AllowTrash:     true,
                        },
                        KeepService: srv,
                }}
@@ -169,7 +171,8 @@ func (bal *balancerSuite) testMultipleViews(c *check.C, readonly bool) {
                        KeepMount: arvados.KeepMount{
                                DeviceID:       bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.DeviceID,
                                UUID:           bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.UUID,
-                               ReadOnly:       readonly,
+                               AllowWrite:     !readonly,
+                               AllowTrash:     !readonly,
                                Replication:    1,
                                StorageClasses: map[string]bool{"default": true},
                        },
@@ -374,7 +377,7 @@ func (bal *balancerSuite) TestDecreaseReplBlockTooNew(c *check.C) {
 }
 
 func (bal *balancerSuite) TestCleanupMounts(c *check.C) {
-       bal.srvs[3].mounts[0].KeepMount.ReadOnly = true
+       bal.srvs[3].mounts[0].KeepMount.AllowWrite = false
        bal.srvs[3].mounts[0].KeepMount.DeviceID = "abcdef"
        bal.srvs[14].mounts[0].KeepMount.UUID = bal.srvs[3].mounts[0].KeepMount.UUID
        bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
@@ -583,6 +586,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
        // classes=[special,special2].
        bal.srvs[9].mounts = []*KeepMount{{
                KeepMount: arvados.KeepMount{
+                       AllowWrite:     true,
+                       AllowTrash:     true,
                        Replication:    1,
                        StorageClasses: map[string]bool{"special": true},
                        UUID:           "zzzzz-mount-special00000009",
@@ -591,6 +596,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
                KeepService: bal.srvs[9],
        }, {
                KeepMount: arvados.KeepMount{
+                       AllowWrite:     true,
+                       AllowTrash:     true,
                        Replication:    1,
                        StorageClasses: map[string]bool{"special": true, "special2": true},
                        UUID:           "zzzzz-mount-special20000009",
@@ -603,6 +610,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
        // classes=[special3], one with classes=[default].
        bal.srvs[13].mounts = []*KeepMount{{
                KeepMount: arvados.KeepMount{
+                       AllowWrite:     true,
+                       AllowTrash:     true,
                        Replication:    1,
                        StorageClasses: map[string]bool{"special2": true},
                        UUID:           "zzzzz-mount-special2000000d",
@@ -611,6 +620,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
                KeepService: bal.srvs[13],
        }, {
                KeepMount: arvados.KeepMount{
+                       AllowWrite:     true,
+                       AllowTrash:     true,
                        Replication:    1,
                        StorageClasses: map[string]bool{"default": true},
                        UUID:           "zzzzz-mount-00000000000000d",
index 4fb469bdcbbba73be4c4aad728e2938939e11bb4..56a52c913a196149d3d4bbe03ec1f8f382018b72 100644 (file)
@@ -480,10 +480,9 @@ func (v *AzureBlobVolume) listBlobs(page int, params storage.ListBlobsParameters
 
 // Trash a Keep block.
 func (v *AzureBlobVolume) Trash(loc string) error {
-       if v.volume.ReadOnly {
+       if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
                return MethodDisabledError
        }
-
        // Ideally we would use If-Unmodified-Since, but that
        // particular condition seems to be ignored by Azure. Instead,
        // we get the Etag before checking Mtime, and use If-Match to
index 9761680fb5137e8601e1a389a918fadecf7a9bc3..db387f494b06c8d8b4e3fc1b38f7732e187e767f 100644 (file)
@@ -211,7 +211,7 @@ func (h *handler) setup(ctx context.Context, cluster *arvados.Cluster, token str
        if d := h.Cluster.Collections.BlobTrashCheckInterval.Duration(); d > 0 &&
                h.Cluster.Collections.BlobTrash &&
                h.Cluster.Collections.BlobDeleteConcurrency > 0 {
-               go emptyTrash(h.volmgr.writables, d)
+               go emptyTrash(h.volmgr.mounts, d)
        }
 
        return nil
index 60fdde89c758c764df43c0e76e48277cf967a2f8..abeb20fe8624a79dfbe0b38159fa98a51119ae39 100644 (file)
@@ -475,8 +475,10 @@ func (rtr *router) handleDELETE(resp http.ResponseWriter, req *http.Request) {
                Deleted int `json:"copies_deleted"`
                Failed  int `json:"copies_failed"`
        }
-       for _, vol := range rtr.volmgr.AllWritable() {
-               if err := vol.Trash(hash); err == nil {
+       for _, vol := range rtr.volmgr.Mounts() {
+               if !vol.KeepMount.AllowTrash {
+                       continue
+               } else if err := vol.Trash(hash); err == nil {
                        result.Deleted++
                } else if os.IsNotExist(err) {
                        continue
index f83ba603d2fd0edb874318176b0120d805c0cab0..953aa047cbfa6ab6f7b55630aa30e83732adaf0e 100644 (file)
@@ -49,7 +49,9 @@ func (e *KeepError) Error() string {
 func emptyTrash(mounts []*VolumeMount, interval time.Duration) {
        for range time.NewTicker(interval).C {
                for _, v := range mounts {
-                       v.EmptyTrash()
+                       if v.KeepMount.AllowTrash {
+                               v.EmptyTrash()
+                       }
                }
        }
 }
index aaec02721b47affd71f274a7d00bd3eee8ad0de3..18b30f463806f996639579689987502f354d411a 100644 (file)
@@ -846,7 +846,7 @@ func (b *s3AWSbucket) Del(path string) error {
 
 // Trash a Keep block.
 func (v *S3AWSVolume) Trash(loc string) error {
-       if v.volume.ReadOnly {
+       if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
                return MethodDisabledError
        }
        if t, err := v.Mtime(loc); err != nil {
index d7c73127eb17caad038f64d50d60414f1db39685..5e8a5a963ceaad37527e652ca19460dce349fcd4 100644 (file)
@@ -36,10 +36,12 @@ func TrashItem(volmgr *RRVolumeManager, logger logrus.FieldLogger, cluster *arva
 
        var volumes []*VolumeMount
        if uuid := trashRequest.MountUUID; uuid == "" {
-               volumes = volmgr.AllWritable()
-       } else if mnt := volmgr.Lookup(uuid, true); mnt == nil {
+               volumes = volmgr.Mounts()
+       } else if mnt := volmgr.Lookup(uuid, false); mnt == nil {
                logger.Warnf("trash request for nonexistent mount: %v", trashRequest)
                return
+       } else if !mnt.KeepMount.AllowTrash {
+               logger.Warnf("trash request for mount with ReadOnly=true, AllowTrashWhenReadOnly=false: %v", trashRequest)
        } else {
                volumes = []*VolumeMount{mnt}
        }
index 8c935dcddb249eb508450d17c1921ca6717ee931..dee4bdc1c1ed1d59badb076dedac2615eef3f2d7 100644 (file)
@@ -442,8 +442,7 @@ func (v *UnixVolume) Trash(loc string) error {
        // be re-written), or (b) Touch() will update the file's timestamp and
        // Trash() will read the correct up-to-date timestamp and choose not to
        // trash the file.
-
-       if v.volume.ReadOnly || !v.cluster.Collections.BlobTrash {
+       if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
                return MethodDisabledError
        }
        if err := v.lock(context.TODO()); err != nil {
index c3b8cd6283e0311c93fcc914ebd3b370045cfa0f..f597ff578106544c54763c9847a3190b53154130 100644 (file)
@@ -316,8 +316,6 @@ func makeRRVolumeManager(logger logrus.FieldLogger, cluster *arvados.Cluster, my
                if err != nil {
                        return nil, fmt.Errorf("error initializing volume %s: %s", uuid, err)
                }
-               logger.Printf("started volume %s (%s), ReadOnly=%v", uuid, vol, cfgvol.ReadOnly || va.ReadOnly)
-
                sc := cfgvol.StorageClasses
                if len(sc) == 0 {
                        sc = map[string]bool{"default": true}
@@ -330,7 +328,8 @@ func makeRRVolumeManager(logger logrus.FieldLogger, cluster *arvados.Cluster, my
                        KeepMount: arvados.KeepMount{
                                UUID:           uuid,
                                DeviceID:       vol.GetDeviceID(),
-                               ReadOnly:       cfgvol.ReadOnly || va.ReadOnly,
+                               AllowWrite:     !va.ReadOnly && !cfgvol.ReadOnly,
+                               AllowTrash:     !va.ReadOnly && (!cfgvol.ReadOnly || cfgvol.AllowTrashWhenReadOnly),
                                Replication:    repl,
                                StorageClasses: sc,
                        },
@@ -340,9 +339,10 @@ func makeRRVolumeManager(logger logrus.FieldLogger, cluster *arvados.Cluster, my
                vm.mounts = append(vm.mounts, mnt)
                vm.mountMap[uuid] = mnt
                vm.readables = append(vm.readables, mnt)
-               if !mnt.KeepMount.ReadOnly {
+               if mnt.KeepMount.AllowWrite {
                        vm.writables = append(vm.writables, mnt)
                }
+               logger.Printf("started volume %s (%s), AllowWrite=%v, AllowTrash=%v", uuid, vol, mnt.AllowWrite, mnt.AllowTrash)
        }
        // pri(mnt): return highest priority of any storage class
        // offered by mnt
@@ -382,7 +382,7 @@ func (vm *RRVolumeManager) Mounts() []*VolumeMount {
 }
 
 func (vm *RRVolumeManager) Lookup(uuid string, needWrite bool) *VolumeMount {
-       if mnt, ok := vm.mountMap[uuid]; ok && (!needWrite || !mnt.ReadOnly) {
+       if mnt, ok := vm.mountMap[uuid]; ok && (!needWrite || mnt.AllowWrite) {
                return mnt
        }
        return nil